hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
563cd9cd86b68262f2683eea09ae3bc26e89964e.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019 by Contributors #include <gtest/gtest.h> #include <xgboost/data.h> #include <xgboost/json.h> #include <thrust/device_vector.h> #include <memory> #include "../../../src/common/bitfield.h" #include "../../../src/common/device_helpers.cuh" #include "../../../src/data/simple_csr_source.h" namespace xgboost { TEST(SimpleCSRSource, FromColumnarDense) { constexpr size_t kRows = 16; Json column { Object() }; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["strides"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(4)))}); thrust::device_vector<float> d_data(kRows); for (size_t i = 0; i < d_data.size(); ++i) { d_data[i] = i * 2.0; } auto p_d_data = dh::Raw(d_data); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); Json column_arr {Array{std::vector<Json>{column}}}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); for (size_t i = 0; i < kRows; ++i) { auto e = data[i]; ASSERT_NEAR(e.fvalue, i * 2.0, kRtEps); ASSERT_EQ(e.index, 0); // feature 0 } ASSERT_EQ(offset.back(), 16); for (size_t i = 0; i < kRows + 1; ++i) { ASSERT_EQ(offset[i], i); } } TEST(SimpleCSRSource, FromColumnarWithEmptyRows) { // In this test we construct a data storage similar to cudf constexpr size_t kRows = 102; constexpr size_t kCols = 24; constexpr size_t kMissingRows = 3; std::vector<Json> v_columns (kCols); std::vector<dh::device_vector<float>> columns_data(kCols); std::vector<dh::device_vector<unsigned char>> column_bitfields(kCols); unsigned char constexpr kUCOne = 1; for (size_t i = 0; i < kCols; ++i) { auto& col = v_columns[i]; col = Object(); auto& data = columns_data[i]; data.resize(kRows); thrust::sequence(data.begin(), data.end(), 0); dh::safe_cuda(hipDeviceSynchronize()); dh::safe_cuda(hipGetLastError()); ASSERT_EQ(data.size(), kRows); auto p_d_data = raw_pointer_cast(data.data()); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; col["data"] = j_data; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; col["shape"] = Array(j_shape); col["version"] = Integer(static_cast<Integer::Int>(1)); col["typestr"] = String("<f4"); // Construct the mask object. col["mask"] = Object(); auto& j_mask = col["mask"]; auto& mask_storage = column_bitfields[i]; mask_storage.resize(16); // 16 bytes mask_storage[0] = ~(kUCOne << 2); // 3^th row is missing mask_storage[1] = ~(kUCOne << 3); // 12^th row is missing size_t last_ind = 12; mask_storage[last_ind] = ~(kUCOne << 5); std::set<size_t> missing_row_index {0, 1, last_ind}; for (size_t i = 0; i < mask_storage.size(); ++i) { if (missing_row_index.find(i) == missing_row_index.cend()) { // all other rows are valid mask_storage[i] = ~0; } } j_mask["data"] = std::vector<Json>{ Json(Integer(reinterpret_cast<Integer::Int>(mask_storage.data().get()))), Json(Boolean(false))}; j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(16)))}); j_mask["typestr"] = String("|i1"); j_mask["null_count"] = Json(Integer(static_cast<Integer::Int>(kMissingRows))); } Json column_arr {Array(v_columns)}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); ASSERT_EQ(offset.size(), kRows + 1); for (size_t i = 1; i < offset.size(); ++i) { for (size_t j = offset[i-1]; j < offset[i]; ++j) { ASSERT_EQ(data[j].index, j % kCols); ASSERT_NEAR(data[j].fvalue, i - 1, kRtEps); } } } TEST(SimpleCSRSource, FromColumnarSparse) { constexpr size_t kRows = 32; constexpr size_t kCols = 2; unsigned char constexpr kUCOne = 1; std::vector<dh::device_vector<float>> columns_data(kCols); std::vector<dh::device_vector<unsigned char>> column_bitfields(kCols); { // column 0 auto& mask = column_bitfields[0]; mask.resize(8); for (size_t j = 0; j < mask.size(); ++j) { mask[j] = ~0; } mask[0] = ~(kUCOne << 2); } { // column 1 auto& mask = column_bitfields[1]; mask.resize(8); for (size_t j = 0; j < mask.size(); ++j) { mask[j] = ~0; } mask[2] = ~(kUCOne << 3); } for (size_t c = 0; c < kCols; ++c) { columns_data[c].resize(kRows); thrust::sequence(columns_data[c].begin(), columns_data[c].end(), 0); } std::vector<Json> j_columns(kCols); for (size_t c = 0; c < kCols; ++c) { auto& column = j_columns[c]; column = Object(); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); auto p_d_data = raw_pointer_cast(columns_data[c].data()); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); column["mask"] = Object(); auto& j_mask = column["mask"]; j_mask["data"] = std::vector<Json>{ Json(Integer(reinterpret_cast<Integer::Int>(column_bitfields[c].data().get()))), Json(Boolean(false))}; j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(8)))}); j_mask["typestr"] = String("|i1"); j_mask["null_count"] = Json(Integer(static_cast<Integer::Int>(1))); } Json column_arr {Array(j_columns)}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); ASSERT_EQ(offset.size(), kRows + 1); ASSERT_EQ(data[4].index, 1); ASSERT_EQ(data[4].fvalue, 2); ASSERT_EQ(data[37].index, 0); ASSERT_EQ(data[37].fvalue, 19); } } // namespace xgboost
563cd9cd86b68262f2683eea09ae3bc26e89964e.cu
// Copyright (c) 2019 by Contributors #include <gtest/gtest.h> #include <xgboost/data.h> #include <xgboost/json.h> #include <thrust/device_vector.h> #include <memory> #include "../../../src/common/bitfield.h" #include "../../../src/common/device_helpers.cuh" #include "../../../src/data/simple_csr_source.h" namespace xgboost { TEST(SimpleCSRSource, FromColumnarDense) { constexpr size_t kRows = 16; Json column { Object() }; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["strides"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(4)))}); thrust::device_vector<float> d_data(kRows); for (size_t i = 0; i < d_data.size(); ++i) { d_data[i] = i * 2.0; } auto p_d_data = dh::Raw(d_data); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); Json column_arr {Array{std::vector<Json>{column}}}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); for (size_t i = 0; i < kRows; ++i) { auto e = data[i]; ASSERT_NEAR(e.fvalue, i * 2.0, kRtEps); ASSERT_EQ(e.index, 0); // feature 0 } ASSERT_EQ(offset.back(), 16); for (size_t i = 0; i < kRows + 1; ++i) { ASSERT_EQ(offset[i], i); } } TEST(SimpleCSRSource, FromColumnarWithEmptyRows) { // In this test we construct a data storage similar to cudf constexpr size_t kRows = 102; constexpr size_t kCols = 24; constexpr size_t kMissingRows = 3; std::vector<Json> v_columns (kCols); std::vector<dh::device_vector<float>> columns_data(kCols); std::vector<dh::device_vector<unsigned char>> column_bitfields(kCols); unsigned char constexpr kUCOne = 1; for (size_t i = 0; i < kCols; ++i) { auto& col = v_columns[i]; col = Object(); auto& data = columns_data[i]; data.resize(kRows); thrust::sequence(data.begin(), data.end(), 0); dh::safe_cuda(cudaDeviceSynchronize()); dh::safe_cuda(cudaGetLastError()); ASSERT_EQ(data.size(), kRows); auto p_d_data = raw_pointer_cast(data.data()); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; col["data"] = j_data; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; col["shape"] = Array(j_shape); col["version"] = Integer(static_cast<Integer::Int>(1)); col["typestr"] = String("<f4"); // Construct the mask object. col["mask"] = Object(); auto& j_mask = col["mask"]; auto& mask_storage = column_bitfields[i]; mask_storage.resize(16); // 16 bytes mask_storage[0] = ~(kUCOne << 2); // 3^th row is missing mask_storage[1] = ~(kUCOne << 3); // 12^th row is missing size_t last_ind = 12; mask_storage[last_ind] = ~(kUCOne << 5); std::set<size_t> missing_row_index {0, 1, last_ind}; for (size_t i = 0; i < mask_storage.size(); ++i) { if (missing_row_index.find(i) == missing_row_index.cend()) { // all other rows are valid mask_storage[i] = ~0; } } j_mask["data"] = std::vector<Json>{ Json(Integer(reinterpret_cast<Integer::Int>(mask_storage.data().get()))), Json(Boolean(false))}; j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(16)))}); j_mask["typestr"] = String("|i1"); j_mask["null_count"] = Json(Integer(static_cast<Integer::Int>(kMissingRows))); } Json column_arr {Array(v_columns)}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); ASSERT_EQ(offset.size(), kRows + 1); for (size_t i = 1; i < offset.size(); ++i) { for (size_t j = offset[i-1]; j < offset[i]; ++j) { ASSERT_EQ(data[j].index, j % kCols); ASSERT_NEAR(data[j].fvalue, i - 1, kRtEps); } } } TEST(SimpleCSRSource, FromColumnarSparse) { constexpr size_t kRows = 32; constexpr size_t kCols = 2; unsigned char constexpr kUCOne = 1; std::vector<dh::device_vector<float>> columns_data(kCols); std::vector<dh::device_vector<unsigned char>> column_bitfields(kCols); { // column 0 auto& mask = column_bitfields[0]; mask.resize(8); for (size_t j = 0; j < mask.size(); ++j) { mask[j] = ~0; } mask[0] = ~(kUCOne << 2); } { // column 1 auto& mask = column_bitfields[1]; mask.resize(8); for (size_t j = 0; j < mask.size(); ++j) { mask[j] = ~0; } mask[2] = ~(kUCOne << 3); } for (size_t c = 0; c < kCols; ++c) { columns_data[c].resize(kRows); thrust::sequence(columns_data[c].begin(), columns_data[c].end(), 0); } std::vector<Json> j_columns(kCols); for (size_t c = 0; c < kCols; ++c) { auto& column = j_columns[c]; column = Object(); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); auto p_d_data = raw_pointer_cast(columns_data[c].data()); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); column["mask"] = Object(); auto& j_mask = column["mask"]; j_mask["data"] = std::vector<Json>{ Json(Integer(reinterpret_cast<Integer::Int>(column_bitfields[c].data().get()))), Json(Boolean(false))}; j_mask["shape"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(8)))}); j_mask["typestr"] = String("|i1"); j_mask["null_count"] = Json(Integer(static_cast<Integer::Int>(1))); } Json column_arr {Array(j_columns)}; std::stringstream ss; Json::Dump(column_arr, &ss); std::string str = ss.str(); std::unique_ptr<data::SimpleCSRSource> source (new data::SimpleCSRSource()); source->CopyFrom(str.c_str()); auto const& data = source->page_.data.HostVector(); auto const& offset = source->page_.offset.HostVector(); ASSERT_EQ(offset.size(), kRows + 1); ASSERT_EQ(data[4].index, 1); ASSERT_EQ(data[4].fvalue, 2); ASSERT_EQ(data[37].index, 0); ASSERT_EQ(data[37].fvalue, 19); } } // namespace xgboost
2ade9de3754c64e5aa4ec1bac4e53ec4c62853e6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "GPUTUPLE.h" #include "GPUetc/common/GNValue.h" #include "GPUetc/expressions/Gcomparisonexpression.h" using namespace voltdb; extern "C" { __global__ void count( COLUMNDATA *lt, COLUMNDATA *prt, uint *count, GComparisonExpression ex, int *r_p, int p_num, int left ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < left){ GNValue tlgnv; if(x == left-1){ tlgnv = lt[x].gn; }else{ tlgnv = lt[x].gn; } uint temp = 0; int idx = tlgnv.getHashValue( 0 , p_num); int temp2 = r_p[idx+1]; for(int k=r_p[idx] ; k<temp2 ; k++){ if(ex.eval(tlgnv,prt[k].gn)){ temp++; } } count[x] = temp; } if(x == left-1){ count[x+1] = 0; } } __global__ void join( COLUMNDATA *lt, COLUMNDATA *prt, RESULT *jt, GComparisonExpression ex, int *r_p, uint *count, int p_num, int left ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < left){ uint writeloc = count[x]; GNValue tlgnv; if(x == left-1){ tlgnv = lt[x].gn; }else{ tlgnv = lt[x].gn; } int idx = tlgnv.getHashValue( 0 , p_num); int temp2 = r_p[idx+1]; for(int k=r_p[idx] ; k<temp2 ; k ++){ if(ex.eval(tlgnv,prt[k].gn)){ jt[writeloc].lkey = lt[x].num; jt[writeloc].rkey = prt[k].num; writeloc++; } } } } }
2ade9de3754c64e5aa4ec1bac4e53ec4c62853e6.cu
#include <stdio.h> #include <stdint.h> #include <cuda.h> #include <sys/time.h> #include "GPUTUPLE.h" #include "GPUetc/common/GNValue.h" #include "GPUetc/expressions/Gcomparisonexpression.h" using namespace voltdb; extern "C" { __global__ void count( COLUMNDATA *lt, COLUMNDATA *prt, uint *count, GComparisonExpression ex, int *r_p, int p_num, int left ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < left){ GNValue tlgnv; if(x == left-1){ tlgnv = lt[x].gn; }else{ tlgnv = lt[x].gn; } uint temp = 0; int idx = tlgnv.getHashValue( 0 , p_num); int temp2 = r_p[idx+1]; for(int k=r_p[idx] ; k<temp2 ; k++){ if(ex.eval(tlgnv,prt[k].gn)){ temp++; } } count[x] = temp; } if(x == left-1){ count[x+1] = 0; } } __global__ void join( COLUMNDATA *lt, COLUMNDATA *prt, RESULT *jt, GComparisonExpression ex, int *r_p, uint *count, int p_num, int left ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < left){ uint writeloc = count[x]; GNValue tlgnv; if(x == left-1){ tlgnv = lt[x].gn; }else{ tlgnv = lt[x].gn; } int idx = tlgnv.getHashValue( 0 , p_num); int temp2 = r_p[idx+1]; for(int k=r_p[idx] ; k<temp2 ; k ++){ if(ex.eval(tlgnv,prt[k].gn)){ jt[writeloc].lkey = lt[x].num; jt[writeloc].rkey = prt[k].num; writeloc++; } } } } }
5cb0de6c9295bf111ab59de1ffd7cca6761b4a08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stk/cuda/tools.hpp> #define PI 3.141592653589793238462643 __global__ void stk_cuKrnl_fourierTransform( const float2* i_srcPos, const float2* i_srcVal, int i_srcSize, const float2* i_dstPos, float2* o_dstVal, int i_dstSize, float i_dir, float i_normalization) { int i; int u = (blockIdx.x * blockDim.x + threadIdx.x); float ph; float2 res; res.x = 0; res.y = 0; if(u<i_dstSize) { for(i=0; i<i_srcSize; i++) { ph = i_srcPos[i].x*i_dstPos[u].x + i_srcPos[i].y*i_dstPos[u].y; ph *= i_dir*2.0*PI; res.x += i_srcVal[i].x * cos(ph) - i_srcVal[i].y * sin(ph); res.y += i_srcVal[i].y * cos(ph) + i_srcVal[i].x * sin(ph); } o_dstVal[u].x = res.x / i_normalization; o_dstVal[u].y = res.y / i_normalization; } } void stk_cuFourierTransform( const float* i_srcPos, const float* i_srcVal, int i_srcSize, const float* i_dstPos, float* o_dstVal, int i_dstSize, float i_dir, float i_normalization) { dim3 dimGrid, dimBlock; stk_cuGetSizes(dimGrid, dimBlock, i_srcSize, i_dstSize); /* INIT ***********************************************************/ const int srcByteSz = i_srcSize*sizeof(float2); const int destByteSz = i_dstSize*sizeof(float2); float2* dvcSrcPos; hipMalloc((void**) &dvcSrcPos, srcByteSz); float2* dvcSrcVal; hipMalloc((void**) &dvcSrcVal, srcByteSz); float2* dvcDestPos; hipMalloc((void**) &dvcDestPos, destByteSz); float2* dvcDestVal; hipMalloc((void**) &dvcDestVal, destByteSz); /* START ***********************************************************/ hipMemcpy(dvcSrcPos, i_srcPos, srcByteSz, hipMemcpyHostToDevice); hipMemcpy(dvcSrcVal, i_srcVal, srcByteSz, hipMemcpyHostToDevice); hipMemcpy(dvcDestPos, i_dstPos, destByteSz, hipMemcpyHostToDevice); hipLaunchKernelGGL(( stk_cuKrnl_fourierTransform) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dvcSrcPos, dvcSrcVal, i_srcSize, dvcDestPos, dvcDestVal, i_dstSize, i_dir, i_normalization); hipMemcpy(o_dstVal, dvcDestVal, destByteSz, hipMemcpyDeviceToHost); hipFree(dvcSrcPos); hipFree(dvcSrcVal); hipFree(dvcDestPos); hipFree(dvcDestVal); }
5cb0de6c9295bf111ab59de1ffd7cca6761b4a08.cu
#include <stk/cuda/tools.hpp> #define PI 3.141592653589793238462643 __global__ void stk_cuKrnl_fourierTransform( const float2* i_srcPos, const float2* i_srcVal, int i_srcSize, const float2* i_dstPos, float2* o_dstVal, int i_dstSize, float i_dir, float i_normalization) { int i; int u = (blockIdx.x * blockDim.x + threadIdx.x); float ph; float2 res; res.x = 0; res.y = 0; if(u<i_dstSize) { for(i=0; i<i_srcSize; i++) { ph = i_srcPos[i].x*i_dstPos[u].x + i_srcPos[i].y*i_dstPos[u].y; ph *= i_dir*2.0*PI; res.x += i_srcVal[i].x * cos(ph) - i_srcVal[i].y * sin(ph); res.y += i_srcVal[i].y * cos(ph) + i_srcVal[i].x * sin(ph); } o_dstVal[u].x = res.x / i_normalization; o_dstVal[u].y = res.y / i_normalization; } } void stk_cuFourierTransform( const float* i_srcPos, const float* i_srcVal, int i_srcSize, const float* i_dstPos, float* o_dstVal, int i_dstSize, float i_dir, float i_normalization) { dim3 dimGrid, dimBlock; stk_cuGetSizes(dimGrid, dimBlock, i_srcSize, i_dstSize); /* INIT ***********************************************************/ const int srcByteSz = i_srcSize*sizeof(float2); const int destByteSz = i_dstSize*sizeof(float2); float2* dvcSrcPos; cudaMalloc((void**) &dvcSrcPos, srcByteSz); float2* dvcSrcVal; cudaMalloc((void**) &dvcSrcVal, srcByteSz); float2* dvcDestPos; cudaMalloc((void**) &dvcDestPos, destByteSz); float2* dvcDestVal; cudaMalloc((void**) &dvcDestVal, destByteSz); /* START ***********************************************************/ cudaMemcpy(dvcSrcPos, i_srcPos, srcByteSz, cudaMemcpyHostToDevice); cudaMemcpy(dvcSrcVal, i_srcVal, srcByteSz, cudaMemcpyHostToDevice); cudaMemcpy(dvcDestPos, i_dstPos, destByteSz, cudaMemcpyHostToDevice); stk_cuKrnl_fourierTransform <<< dimGrid, dimBlock >>> ( dvcSrcPos, dvcSrcVal, i_srcSize, dvcDestPos, dvcDestVal, i_dstSize, i_dir, i_normalization); cudaMemcpy(o_dstVal, dvcDestVal, destByteSz, cudaMemcpyDeviceToHost); cudaFree(dvcSrcPos); cudaFree(dvcSrcVal); cudaFree(dvcDestPos); cudaFree(dvcDestVal); }
ccfe76fd4ed31377aeb0e5e865bee45da4eade55.hip
// !!! This is a file automatically generated by hipify!!! #include "lab3_cuda.h" #include <iostream> #include <cmath> #include <malloc.h> #include <fstream> #include <bits/stdc++.h> #include <hip/hip_runtime.h> #define pb push_back using namespace std; #define TOLERANCE 0.001 #define JACOBI_UPDATE_TOLERANCE 0.00001 #define FILENAME1 "testcase_1000_300" #define FILENAME2 "iris_stndardized" #define samples 150 #define features 4 #define BLOCK_SIZE 16 double **S; //Symmetric matrix (input) double *e; //eigenvalues double **E; //eigenvectors int *ind; bool *changed; int state; int N; void read_file(char* filename, int num_samples, int num_features, double** A) { ifstream ifile; ifile.open(filename, ios::in); double tmp; for (int i=0; i<num_samples; i++) { for (int j=0; j<num_features; j++){ ifile >> tmp; A[i][j] = tmp; } } ifile.close(); } __attribute__((optimize("-O3"))) double* mat_transpose(double* A, int Am, int An) { double *B; B = (double*)malloc(__SIZEOF_DOUBLE__*An*Am); // B = (double**)malloc(__SIZEOF_POINTER__*An); // for (int i=0; i<An; i++) // B[i] = (double*)malloc(__SIZEOF_DOUBLE__*Am); for (int i=0; i<Am; i++){ for (int j=0; j<An; j++){ B[j*Am + i] = A[i*An + j]; } } return B; } __attribute__((optimize("-O3"))) double** mat_mul(double** A, int Am, int An, double** B, int Bm, int Bn){ double **C; C = (double**)malloc(__SIZEOF_POINTER__*Am); for (int i=0; i<Am; i++) C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn); for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i][j] = 0; for (int k=0; k<An; k++){ C[i][j] += A[i][k] * B[k][j]; } } } return C; } __attribute__((optimize("-O3"))) double* new_mat_mul(double* A, int Am, int An, double* B, int Bm, int Bn){ double *C; C = (double*)malloc(__SIZEOF_DOUBLE__*Am*Bn); // C = (double**)malloc(__SIZEOF_POINTER__*Am); // for (int i=0; i<Am; i++) // C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn); for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i*Bn + j] = 0; for (int k=0; k<An; k++){ C[i*Bn + j] += A[i*An + k] * B[k*Bn + j]; } } } return C; } // dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); // dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); __global__ void gpu_matmul(double *a,double *b, double *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } __attribute__((optimize("-O3"))) int maxind(int k) { int m = k+1; for (int i = k+2; i < N; i++){ if (fabs(S[k][i]) > fabs(S[k][m])){ m = i; } } return m; } __attribute__((optimize("-O3"))) void update(int k, double t) { double ek_prev = e[k]; e[k] = ek_prev + t; if (e[k] < 0) e[k] = 0; if (changed[k] && (ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) { changed[k] = false; state = state - 1; } else if ((! changed[k]) && (ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) { changed[k] = true; state = state + 1; } } __attribute__((optimize("-O3"))) void rotate(int k, int l, int i, int j, double c, double s, bool eigenvectors){ // double** mat1; // double** mat2; // double** mat3; // double mat1_00; // double mat1_01; // double mat1_10; // double mat1_11; double mat2_00; double mat2_10; // double mat3_00; // double mat3_10; // mat1 = (double**)malloc(__SIZEOF_POINTER__*2); // mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2); // mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2); // mat1[0][0] = c; mat1[0][1] = -s; // mat1[1][0] = s; mat1[1][1] = c; // mat1_00 = c; mat1_01 = -s; // mat1_10 = s; mat1_11 = c; // mat2 = (double**)malloc(__SIZEOF_POINTER__*2); // mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); // mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); // if (eigenvectors){ // mat2[0][0] = E[i][k]; // mat2[1][0] = E[i][l]; // } // else { // mat2[0][0] = S[k][l]; // mat2[1][0] = S[i][j]; // } // if (eigenvectors){ // mat2_00 = E[i][k]; // mat2_10 = E[i][l]; // } // else { // mat2_00 = S[k][l]; // mat2_10 = S[i][j]; // } // mat3_00 = (c*mat2_00) - (s*mat2_10); // mat3_10 = (s*mat2_00) + (c*mat2_10); // mat3 = mat_mul(mat1, 2, 2, mat2, 2, 1); // if (eigenvectors){ // E[i][k] = mat3[0][0]; // E[i][l] = mat3[1][0]; // } // else{ // S[k][l] = mat3[0][0]; // S[i][j] = mat3[1][0]; // } // if (eigenvectors){ // E[i][k] = (c*mat2_00) - (s*mat2_10);; // E[i][l] = (s*mat2_00) + (c*mat2_10);; // } // else{ // S[k][l] = (c*mat2_00) - (s*mat2_10);; // S[i][j] = (s*mat2_00) + (c*mat2_10);; // } if (eigenvectors){ mat2_00 = E[i][k]; mat2_10 = E[i][l]; E[i][k] = (c*mat2_00) - (s*mat2_10);; E[i][l] = (s*mat2_00) + (c*mat2_10);; } else{ mat2_00 = S[k][l]; mat2_10 = S[i][j]; S[k][l] = (c*mat2_00) - (s*mat2_10);; S[i][j] = (s*mat2_00) + (c*mat2_10);; } // free(mat1[0]); // free(mat1[1]); // free(mat1); // free(mat2[0]); // free(mat2[1]); // free(mat2); // free(mat3[0]); // free(mat3[1]); // free(mat3); } void print_matrix(double** A, int Am, int An) { cout << "["; for (int i=0; i<Am; i++){ if (i>0) cout<<" "; cout<<"["; for (int j=0; j<An-1; j++){ cout << A[i][j] << ", "; } if (i < Am-1) cout << A[i][An-1] << "]" << endl; } cout << A[Am-1][An-1] << "]]" << endl; } void print_vector(double* A, int An) { cout << "["; for(int i=0; i<An-1; i++) cout << A[i] << ","; cout << A[An-1] << "]" << endl; } __attribute__((optimize("-O3"))) void init_jacobi() { E = (double**)malloc(__SIZEOF_POINTER__*N); for (int i=0; i<N; i++){ E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N); for (int j=0; j<N; j++){ E[i][j] = 0; } E[i][i] = 1; } state = N; e = (double*)malloc(__SIZEOF_DOUBLE__*N); ind = (int*)malloc(__SIZEOF_INT__*N); changed = (bool*)malloc(sizeof(bool)*N); for (int k=0; k<N; k++){ ind[k] = maxind(k); e[k] = S[k][k]; changed[k] = true; } } __attribute__((optimize("-O3"))) void Jacobi(double **input_matrix, int n, double **eigenvalues, double ***eigenvectors) { N = n; S = input_matrix; init_jacobi(); int count=0; // float totaltime=0; while(state != 0){ int m = 0; count++; // float computation_time1; // hipEvent_t start1, stop1; // hipEventCreate(&start1); // hipEventCreate(&stop1); // hipEventRecord(start1); for (int k=1; k<N-1; k++){ if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){ m = k; } } // hipEventRecord(stop1); // hipEventSynchronize(stop1); // hipEventElapsedTime(&computation_time1, start1, stop1); // // cout << "time for 1 loop: " << computation_time1 << endl; // totaltime+=computation_time1; int k = m; int l = ind[m]; double p = S[k][l]; double y = (e[l] - e[k]) / 2.0; double d = fabs(y) + sqrt(p*p + y*y); double r = sqrt(p*p + d*d); double c = d / r; double s = p / r; double t = (p*p) / d; if (y < 0.0) { s = -s; t = -t; } S[k][l] = 0.0; update(k, -t); update(l, t); for (int i=0; i<k; i++){ // rotate(i, k, i, l, c, s, false); double mat2_00 = S[i][k]; double mat2_10 = S[i][l]; S[i][k] = (c*mat2_00) - (s*mat2_10);; S[i][l] = (s*mat2_00) + (c*mat2_10);; } for (int i=k+1; i<l; i++){ // rotate(k, i, i, l, c, s, false); double mat2_00 = S[k][i]; double mat2_10 = S[i][l]; S[k][i] = (c*mat2_00) - (s*mat2_10);; S[i][l] = (s*mat2_00) + (c*mat2_10);; } for (int i=l+1; i<N; i++){ // rotate(k, i, l, i, c, s, false); double mat2_00 = S[k][i]; double mat2_10 = S[l][i]; S[k][i] = (c*mat2_00) - (s*mat2_10);; S[l][i] = (s*mat2_00) + (c*mat2_10);; } for (int i=0; i<N; i++){ // rotate(k, l, i, i, c, s, true); double mat2_00 = E[i][k]; double mat2_10 = E[i][l]; E[i][k] = (c*mat2_00) - (s*mat2_10);; E[i][l] = (s*mat2_00) + (c*mat2_10);; } // for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); } // for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); } // for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); } // for (int i=0; i<N; i++){ // rotate(k, l, i, i, c, s, true); // } ind[k] = maxind(k); ind[l] = maxind(l); } *eigenvalues = e; *eigenvectors = E; // cout << "Total time for loop: "<<totaltime << endl; cout << "Total iterations: "<<count << endl; // cout << "Changednowagain" << endl; } // int main(){ // double **D, **D_T; // double **prod, *eigenvalues, **eigenvectors; // D = (double**)malloc(sizeof(double*)*samples); // for (int i=0; i<samples; i++) // D[i] = (double*)malloc(sizeof(double)*features); // read_file((char*)FILENAME1, samples, features, D); // D_T = mat_transpose(D, samples, features); // prod = mat_mul(D_T, features, samples, D, samples, features); // Jacobi(prod, features, &eigenvalues, &eigenvectors); // cout << "\neigenvalues:" << endl; // print_vector(eigenvalues, features); // cout << "\neigenvectors:" << endl; // print_matrix(eigenvectors, features, features); // return 0; // } // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ __attribute__((optimize("-O3"))) void SVD_and_PCA (int M, int N, double* D, double** U, double** SIGMA, double** V_T, double** D_HAT, int *K, int retention) { // write your code here double *d; double *d_t; double **product, *eigenvalues, **eigenvectors; // double **v; d = (double*)malloc(sizeof(double*)*M*N); // // for (int i=0; i<M; i++) // d[i] = (double*)malloc(sizeof(double)*N); for(int i=0;i<M;i++){ for(int j=0;j<N;j++) d[i*N+j] = D[i*N+j]; } d_t = mat_transpose(d, M, N); // for(int i=0;i<N;i++){ // for(int j=0;j<M;j++){ // printf("%f ", (d_t)[i*M+j]); // } // printf("\n"); // } // product = new_mat_mul(d_t, N, M, d, M, N); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// double *product1; product1 = (double*)malloc(sizeof(double)*N*N); double *gpu_a, *gpu_b, *gpu_c; hipMalloc((void **) &gpu_a, sizeof(double)*N*M); hipMalloc((void **) &gpu_b, sizeof(double)*M*N); hipMalloc((void **) &gpu_c, sizeof(double)*N*N); // copying matrix d_t and d from host to device memory hipMemcpy(gpu_a, d_t, sizeof(double)*N*M, hipMemcpyHostToDevice); hipMemcpy(gpu_b, d, sizeof(double)*M*N, hipMemcpyHostToDevice); unsigned int g_rows = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(g_cols, g_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( gpu_matmul), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c, N, M, N); hipMemcpy(product1, gpu_c, sizeof(double)*N*N, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(gpu_a); hipFree(gpu_b); hipFree(gpu_c); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// // size N*N product = (double**)malloc(sizeof(double*)*N); for (int i=0; i<N; i++) product[i] = (double*)malloc(sizeof(double)*N); for(int i=0; i<N; i++){ for(int j=0; j<N; j++) product[i][j]=product1[i*N + j]; } // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product)[i][j]); // } // printf("\n"); // } float computation_time1; hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); hipEventRecord(start1); Jacobi(product, N, &eigenvalues, &eigenvectors); hipEventRecord(stop1); hipEventSynchronize(stop1); hipEventElapsedTime(&computation_time1, start1, stop1); printf("Time taken for Jacobi: %f\n", computation_time1); // for(int i=0;i<N;i++) printf("%f\n", eigenvalues[i]); vector<double> eigenvals; for(int i=0; i<N; i++) eigenvals.pb(eigenvalues[i]); vector<pair<double, int> > eigenv_index; for(int i=0; i<eigenvals.size(); i++){ eigenv_index.pb(make_pair(eigenvalues[i],i)); } sort(eigenv_index.begin(), eigenv_index.end()); int e = eigenv_index.size()-1; for(int i=0;i<N;i++){ (*SIGMA)[i] = sqrt(eigenv_index[e].first); e--; } // for(int i=0;i<N;i++) printf("%f\n", (*SIGMA)[i]); double *u = (double*)malloc(sizeof(double)*N*N); // double **u = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // u[i] = (double*)malloc(sizeof(double)*N); e = eigenv_index.size()-1; for(int j=0;j<N;j++){ int index = eigenv_index[e].second; for(int i=0;i<N;i++){ u[i*N + j] = eigenvectors[i][index]; } e--; } for(int j=0;j<N;j++){ for(int i=0;i<N;i++){ (*U)[i*N+j] = u[i*N + j]; } } // for(int j=0;j<N;j++){ // for(int i=0;i<N;i++){ // printf("%f ", (*U)[i*N+j]); // } // printf("\n"); // } // size N*M double *sigma_invT = (double*)malloc(sizeof(double*)*N*M); // double **sigma_invT = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // sigma_invT[i] = (double*)malloc(sizeof(double)*M); for(int i=0; i<N; i++){ for(int j=0; j<M; j++) sigma_invT[i*M + j]=0; } e = eigenv_index.size()-1; for(int i=0; i<N;i++){ if(eigenv_index[e].first<1e-5){ sigma_invT[i*M + i]= 0; } else{ sigma_invT[i*M + i]= 1/sqrt(eigenv_index[e].first); } e--; } // double **temp = mat_mul(d, M, N, u, N, N); // double **v = mat_mul(temp, M, N, sigma_invT, N, M); // double **v_t = mat_transpose(v, M, M); /////////////////////////////////////////////////////////////// double *temp; temp = (double*)malloc(sizeof(double)*M*N); double *gpu_a1, *gpu_b1, *gpu_c1; hipMalloc((void **) &gpu_a1, sizeof(double)*M*N); hipMalloc((void **) &gpu_b1, sizeof(double)*N*N); hipMalloc((void **) &gpu_c1, sizeof(double)*M*N); // copying matrix d_t and d from host to device memory hipMemcpy(gpu_a1, d, sizeof(double)*M*N, hipMemcpyHostToDevice); hipMemcpy(gpu_b1, u, sizeof(double)*N*N, hipMemcpyHostToDevice); unsigned int g_rows1 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols1 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid1(g_cols1, g_rows1); dim3 dimBlock1(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( gpu_matmul), dim3(dimGrid1), dim3(dimBlock1), 0, 0, gpu_a1, gpu_b1, gpu_c1, M, N, N); hipMemcpy(temp, gpu_c1, sizeof(double)*M*N, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(gpu_a1); hipFree(gpu_b1); hipFree(gpu_c1); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// double *v; v = (double*)malloc(sizeof(double)*M*M); double *gpu_a2, *gpu_b2, *gpu_c2; hipMalloc((void **) &gpu_a2, sizeof(double)*M*N); hipMalloc((void **) &gpu_b2, sizeof(double)*N*M); hipMalloc((void **) &gpu_c2, sizeof(double)*M*M); // copying matrix d_t and d from host to device memory hipMemcpy(gpu_a2, temp, sizeof(double)*M*N, hipMemcpyHostToDevice); hipMemcpy(gpu_b2, sigma_invT, sizeof(double)*N*M, hipMemcpyHostToDevice); unsigned int g_rows2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(g_cols2, g_rows2); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( gpu_matmul), dim3(dimGrid2), dim3(dimBlock2), 0, 0, gpu_a2, gpu_b2, gpu_c2, M, N, M); hipMemcpy(v, gpu_c2, sizeof(double)*M*M, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(gpu_a2); hipFree(gpu_b2); hipFree(gpu_c2); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) printf("%f ", v_t[i][j]); // printf("\n"); // } // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) (*V_T)[i*M+j] = v_t[i][j]; // } for(int i=0; i<M; i++){ for(int j=0; j<M; j++) (*V_T)[i*M+j] = v[j*M + i]; } // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) printf("%f ", V_T[i][j]); // printf("\n"); // } double num=0; int k=0; double sigmasqsum=0; for(k=0; k<N; k++){ sigmasqsum += (*SIGMA)[k]*(*SIGMA)[k]; } for(k=0; k<N; k++){ num += ((*SIGMA)[k]*(*SIGMA)[k])/sigmasqsum; if(num >= retention/100.0){ break; } } *K = k+1; // double **newU; // double **newU = (double**)malloc(sizeof(double*)*N*(k+1)); double *newU = (double*)malloc(sizeof(double)*N*(k+1)); // double **newU = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // newU[i] = (double*)malloc(sizeof(double)*(k+1)); for(int i=0; i<N; i++){ for(int j=0;j<k+1;j++){ newU[i*(k+1) + j] = (u)[i*N + j]; } } // for(int i=0; i<N; i++){ // for(int j=0; j<(k+1); j++) printf("%f ", newU[i][j]); // printf("\n"); // } // double **d_hat = (double**)malloc(sizeof(double*)*M); // for (int i=0; i<(k+1); i++) // d_hat[i] = (double*)malloc(sizeof(double)*(k+1)); // d_hat = mat_mul(d, M, N, newU, N, (k+1)); /////////////////////////////////////////////////////////////// double *d_hat = (double*)malloc(sizeof(double)*M*(k+1)); // v = (double*)malloc(sizeof(double)*M*M); double *gpu_a3, *gpu_b3, *gpu_c3; hipMalloc((void **) &gpu_a3, sizeof(double)*M*N); hipMalloc((void **) &gpu_b3, sizeof(double)*N*(k+1)); hipMalloc((void **) &gpu_c3, sizeof(double)*M*(k+1)); // copying matrix d_t and d from host to device memory hipMemcpy(gpu_a3, d, sizeof(double)*M*N, hipMemcpyHostToDevice); hipMemcpy(gpu_b3, newU, sizeof(double)*N*(k+1), hipMemcpyHostToDevice); unsigned int g_rows3 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols3 = ((k+1) + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(g_cols3, g_rows3); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( gpu_matmul), dim3(dimGrid3), dim3(dimBlock3), 0, 0, gpu_a3, gpu_b3, gpu_c3, M, N, (k+1)); hipMemcpy(d_hat, gpu_c3, sizeof(double)*M*(k+1), hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(gpu_a3); hipFree(gpu_b3); hipFree(gpu_c3); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// *D_HAT = (double*) malloc(sizeof(double) * M*(k+1)); for(int i=0; i<M; i++){ for(int j=0;j<k+1;j++){ (*D_HAT)[i*(k+1)+j] = d_hat[i*(k+1) + j]; } } }
ccfe76fd4ed31377aeb0e5e865bee45da4eade55.cu
#include "lab3_cuda.h" #include <iostream> #include <cmath> #include <malloc.h> #include <fstream> #include <bits/stdc++.h> #include <cuda.h> #define pb push_back using namespace std; #define TOLERANCE 0.001 #define JACOBI_UPDATE_TOLERANCE 0.00001 #define FILENAME1 "testcase_1000_300" #define FILENAME2 "iris_stndardized" #define samples 150 #define features 4 #define BLOCK_SIZE 16 double **S; //Symmetric matrix (input) double *e; //eigenvalues double **E; //eigenvectors int *ind; bool *changed; int state; int N; void read_file(char* filename, int num_samples, int num_features, double** A) { ifstream ifile; ifile.open(filename, ios::in); double tmp; for (int i=0; i<num_samples; i++) { for (int j=0; j<num_features; j++){ ifile >> tmp; A[i][j] = tmp; } } ifile.close(); } __attribute__((optimize("-O3"))) double* mat_transpose(double* A, int Am, int An) { double *B; B = (double*)malloc(__SIZEOF_DOUBLE__*An*Am); // B = (double**)malloc(__SIZEOF_POINTER__*An); // for (int i=0; i<An; i++) // B[i] = (double*)malloc(__SIZEOF_DOUBLE__*Am); for (int i=0; i<Am; i++){ for (int j=0; j<An; j++){ B[j*Am + i] = A[i*An + j]; } } return B; } __attribute__((optimize("-O3"))) double** mat_mul(double** A, int Am, int An, double** B, int Bm, int Bn){ double **C; C = (double**)malloc(__SIZEOF_POINTER__*Am); for (int i=0; i<Am; i++) C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn); for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i][j] = 0; for (int k=0; k<An; k++){ C[i][j] += A[i][k] * B[k][j]; } } } return C; } __attribute__((optimize("-O3"))) double* new_mat_mul(double* A, int Am, int An, double* B, int Bm, int Bn){ double *C; C = (double*)malloc(__SIZEOF_DOUBLE__*Am*Bn); // C = (double**)malloc(__SIZEOF_POINTER__*Am); // for (int i=0; i<Am; i++) // C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn); for (int i=0; i<Am; i++){ for (int j=0; j<Bn; j++){ C[i*Bn + j] = 0; for (int k=0; k<An; k++){ C[i*Bn + j] += A[i*An + k] * B[k*Bn + j]; } } } return C; } // dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); // dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); __global__ void gpu_matmul(double *a,double *b, double *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } __attribute__((optimize("-O3"))) int maxind(int k) { int m = k+1; for (int i = k+2; i < N; i++){ if (fabs(S[k][i]) > fabs(S[k][m])){ m = i; } } return m; } __attribute__((optimize("-O3"))) void update(int k, double t) { double ek_prev = e[k]; e[k] = ek_prev + t; if (e[k] < 0) e[k] = 0; if (changed[k] && (ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) { changed[k] = false; state = state - 1; } else if ((! changed[k]) && (ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) { changed[k] = true; state = state + 1; } } __attribute__((optimize("-O3"))) void rotate(int k, int l, int i, int j, double c, double s, bool eigenvectors){ // double** mat1; // double** mat2; // double** mat3; // double mat1_00; // double mat1_01; // double mat1_10; // double mat1_11; double mat2_00; double mat2_10; // double mat3_00; // double mat3_10; // mat1 = (double**)malloc(__SIZEOF_POINTER__*2); // mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2); // mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2); // mat1[0][0] = c; mat1[0][1] = -s; // mat1[1][0] = s; mat1[1][1] = c; // mat1_00 = c; mat1_01 = -s; // mat1_10 = s; mat1_11 = c; // mat2 = (double**)malloc(__SIZEOF_POINTER__*2); // mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1); // mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1); // if (eigenvectors){ // mat2[0][0] = E[i][k]; // mat2[1][0] = E[i][l]; // } // else { // mat2[0][0] = S[k][l]; // mat2[1][0] = S[i][j]; // } // if (eigenvectors){ // mat2_00 = E[i][k]; // mat2_10 = E[i][l]; // } // else { // mat2_00 = S[k][l]; // mat2_10 = S[i][j]; // } // mat3_00 = (c*mat2_00) - (s*mat2_10); // mat3_10 = (s*mat2_00) + (c*mat2_10); // mat3 = mat_mul(mat1, 2, 2, mat2, 2, 1); // if (eigenvectors){ // E[i][k] = mat3[0][0]; // E[i][l] = mat3[1][0]; // } // else{ // S[k][l] = mat3[0][0]; // S[i][j] = mat3[1][0]; // } // if (eigenvectors){ // E[i][k] = (c*mat2_00) - (s*mat2_10);; // E[i][l] = (s*mat2_00) + (c*mat2_10);; // } // else{ // S[k][l] = (c*mat2_00) - (s*mat2_10);; // S[i][j] = (s*mat2_00) + (c*mat2_10);; // } if (eigenvectors){ mat2_00 = E[i][k]; mat2_10 = E[i][l]; E[i][k] = (c*mat2_00) - (s*mat2_10);; E[i][l] = (s*mat2_00) + (c*mat2_10);; } else{ mat2_00 = S[k][l]; mat2_10 = S[i][j]; S[k][l] = (c*mat2_00) - (s*mat2_10);; S[i][j] = (s*mat2_00) + (c*mat2_10);; } // free(mat1[0]); // free(mat1[1]); // free(mat1); // free(mat2[0]); // free(mat2[1]); // free(mat2); // free(mat3[0]); // free(mat3[1]); // free(mat3); } void print_matrix(double** A, int Am, int An) { cout << "["; for (int i=0; i<Am; i++){ if (i>0) cout<<" "; cout<<"["; for (int j=0; j<An-1; j++){ cout << A[i][j] << ", "; } if (i < Am-1) cout << A[i][An-1] << "]" << endl; } cout << A[Am-1][An-1] << "]]" << endl; } void print_vector(double* A, int An) { cout << "["; for(int i=0; i<An-1; i++) cout << A[i] << ","; cout << A[An-1] << "]" << endl; } __attribute__((optimize("-O3"))) void init_jacobi() { E = (double**)malloc(__SIZEOF_POINTER__*N); for (int i=0; i<N; i++){ E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N); for (int j=0; j<N; j++){ E[i][j] = 0; } E[i][i] = 1; } state = N; e = (double*)malloc(__SIZEOF_DOUBLE__*N); ind = (int*)malloc(__SIZEOF_INT__*N); changed = (bool*)malloc(sizeof(bool)*N); for (int k=0; k<N; k++){ ind[k] = maxind(k); e[k] = S[k][k]; changed[k] = true; } } __attribute__((optimize("-O3"))) void Jacobi(double **input_matrix, int n, double **eigenvalues, double ***eigenvectors) { N = n; S = input_matrix; init_jacobi(); int count=0; // float totaltime=0; while(state != 0){ int m = 0; count++; // float computation_time1; // cudaEvent_t start1, stop1; // cudaEventCreate(&start1); // cudaEventCreate(&stop1); // cudaEventRecord(start1); for (int k=1; k<N-1; k++){ if (fabs(S[k][ind[k]]) > fabs(S[m][ind[m]])){ m = k; } } // cudaEventRecord(stop1); // cudaEventSynchronize(stop1); // cudaEventElapsedTime(&computation_time1, start1, stop1); // // cout << "time for 1 loop: " << computation_time1 << endl; // totaltime+=computation_time1; int k = m; int l = ind[m]; double p = S[k][l]; double y = (e[l] - e[k]) / 2.0; double d = fabs(y) + sqrt(p*p + y*y); double r = sqrt(p*p + d*d); double c = d / r; double s = p / r; double t = (p*p) / d; if (y < 0.0) { s = -s; t = -t; } S[k][l] = 0.0; update(k, -t); update(l, t); for (int i=0; i<k; i++){ // rotate(i, k, i, l, c, s, false); double mat2_00 = S[i][k]; double mat2_10 = S[i][l]; S[i][k] = (c*mat2_00) - (s*mat2_10);; S[i][l] = (s*mat2_00) + (c*mat2_10);; } for (int i=k+1; i<l; i++){ // rotate(k, i, i, l, c, s, false); double mat2_00 = S[k][i]; double mat2_10 = S[i][l]; S[k][i] = (c*mat2_00) - (s*mat2_10);; S[i][l] = (s*mat2_00) + (c*mat2_10);; } for (int i=l+1; i<N; i++){ // rotate(k, i, l, i, c, s, false); double mat2_00 = S[k][i]; double mat2_10 = S[l][i]; S[k][i] = (c*mat2_00) - (s*mat2_10);; S[l][i] = (s*mat2_00) + (c*mat2_10);; } for (int i=0; i<N; i++){ // rotate(k, l, i, i, c, s, true); double mat2_00 = E[i][k]; double mat2_10 = E[i][l]; E[i][k] = (c*mat2_00) - (s*mat2_10);; E[i][l] = (s*mat2_00) + (c*mat2_10);; } // for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); } // for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); } // for (int i=l+1; i<N; i++) { rotate(k, i, l, i, c, s, false); } // for (int i=0; i<N; i++){ // rotate(k, l, i, i, c, s, true); // } ind[k] = maxind(k); ind[l] = maxind(l); } *eigenvalues = e; *eigenvectors = E; // cout << "Total time for loop: "<<totaltime << endl; cout << "Total iterations: "<<count << endl; // cout << "Changednowagain" << endl; } // int main(){ // double **D, **D_T; // double **prod, *eigenvalues, **eigenvectors; // D = (double**)malloc(sizeof(double*)*samples); // for (int i=0; i<samples; i++) // D[i] = (double*)malloc(sizeof(double)*features); // read_file((char*)FILENAME1, samples, features, D); // D_T = mat_transpose(D, samples, features); // prod = mat_mul(D_T, features, samples, D, samples, features); // Jacobi(prod, features, &eigenvalues, &eigenvectors); // cout << "\neigenvalues:" << endl; // print_vector(eigenvalues, features); // cout << "\neigenvectors:" << endl; // print_matrix(eigenvectors, features, features); // return 0; // } // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ __attribute__((optimize("-O3"))) void SVD_and_PCA (int M, int N, double* D, double** U, double** SIGMA, double** V_T, double** D_HAT, int *K, int retention) { // write your code here double *d; double *d_t; double **product, *eigenvalues, **eigenvectors; // double **v; d = (double*)malloc(sizeof(double*)*M*N); // // for (int i=0; i<M; i++) // d[i] = (double*)malloc(sizeof(double)*N); for(int i=0;i<M;i++){ for(int j=0;j<N;j++) d[i*N+j] = D[i*N+j]; } d_t = mat_transpose(d, M, N); // for(int i=0;i<N;i++){ // for(int j=0;j<M;j++){ // printf("%f ", (d_t)[i*M+j]); // } // printf("\n"); // } // product = new_mat_mul(d_t, N, M, d, M, N); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// double *product1; product1 = (double*)malloc(sizeof(double)*N*N); double *gpu_a, *gpu_b, *gpu_c; cudaMalloc((void **) &gpu_a, sizeof(double)*N*M); cudaMalloc((void **) &gpu_b, sizeof(double)*M*N); cudaMalloc((void **) &gpu_c, sizeof(double)*N*N); // copying matrix d_t and d from host to device memory cudaMemcpy(gpu_a, d_t, sizeof(double)*N*M, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, d, sizeof(double)*M*N, cudaMemcpyHostToDevice); unsigned int g_rows = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(g_cols, g_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); gpu_matmul<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c, N, M, N); cudaMemcpy(product1, gpu_c, sizeof(double)*N*N, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// // size N*N product = (double**)malloc(sizeof(double*)*N); for (int i=0; i<N; i++) product[i] = (double*)malloc(sizeof(double)*N); for(int i=0; i<N; i++){ for(int j=0; j<N; j++) product[i][j]=product1[i*N + j]; } // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product)[i][j]); // } // printf("\n"); // } float computation_time1; cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventRecord(start1); Jacobi(product, N, &eigenvalues, &eigenvectors); cudaEventRecord(stop1); cudaEventSynchronize(stop1); cudaEventElapsedTime(&computation_time1, start1, stop1); printf("Time taken for Jacobi: %f\n", computation_time1); // for(int i=0;i<N;i++) printf("%f\n", eigenvalues[i]); vector<double> eigenvals; for(int i=0; i<N; i++) eigenvals.pb(eigenvalues[i]); vector<pair<double, int> > eigenv_index; for(int i=0; i<eigenvals.size(); i++){ eigenv_index.pb(make_pair(eigenvalues[i],i)); } sort(eigenv_index.begin(), eigenv_index.end()); int e = eigenv_index.size()-1; for(int i=0;i<N;i++){ (*SIGMA)[i] = sqrt(eigenv_index[e].first); e--; } // for(int i=0;i<N;i++) printf("%f\n", (*SIGMA)[i]); double *u = (double*)malloc(sizeof(double)*N*N); // double **u = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // u[i] = (double*)malloc(sizeof(double)*N); e = eigenv_index.size()-1; for(int j=0;j<N;j++){ int index = eigenv_index[e].second; for(int i=0;i<N;i++){ u[i*N + j] = eigenvectors[i][index]; } e--; } for(int j=0;j<N;j++){ for(int i=0;i<N;i++){ (*U)[i*N+j] = u[i*N + j]; } } // for(int j=0;j<N;j++){ // for(int i=0;i<N;i++){ // printf("%f ", (*U)[i*N+j]); // } // printf("\n"); // } // size N*M double *sigma_invT = (double*)malloc(sizeof(double*)*N*M); // double **sigma_invT = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // sigma_invT[i] = (double*)malloc(sizeof(double)*M); for(int i=0; i<N; i++){ for(int j=0; j<M; j++) sigma_invT[i*M + j]=0; } e = eigenv_index.size()-1; for(int i=0; i<N;i++){ if(eigenv_index[e].first<1e-5){ sigma_invT[i*M + i]= 0; } else{ sigma_invT[i*M + i]= 1/sqrt(eigenv_index[e].first); } e--; } // double **temp = mat_mul(d, M, N, u, N, N); // double **v = mat_mul(temp, M, N, sigma_invT, N, M); // double **v_t = mat_transpose(v, M, M); /////////////////////////////////////////////////////////////// double *temp; temp = (double*)malloc(sizeof(double)*M*N); double *gpu_a1, *gpu_b1, *gpu_c1; cudaMalloc((void **) &gpu_a1, sizeof(double)*M*N); cudaMalloc((void **) &gpu_b1, sizeof(double)*N*N); cudaMalloc((void **) &gpu_c1, sizeof(double)*M*N); // copying matrix d_t and d from host to device memory cudaMemcpy(gpu_a1, d, sizeof(double)*M*N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b1, u, sizeof(double)*N*N, cudaMemcpyHostToDevice); unsigned int g_rows1 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols1 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid1(g_cols1, g_rows1); dim3 dimBlock1(BLOCK_SIZE, BLOCK_SIZE); gpu_matmul<<<dimGrid1, dimBlock1>>>(gpu_a1, gpu_b1, gpu_c1, M, N, N); cudaMemcpy(temp, gpu_c1, sizeof(double)*M*N, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(gpu_a1); cudaFree(gpu_b1); cudaFree(gpu_c1); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// double *v; v = (double*)malloc(sizeof(double)*M*M); double *gpu_a2, *gpu_b2, *gpu_c2; cudaMalloc((void **) &gpu_a2, sizeof(double)*M*N); cudaMalloc((void **) &gpu_b2, sizeof(double)*N*M); cudaMalloc((void **) &gpu_c2, sizeof(double)*M*M); // copying matrix d_t and d from host to device memory cudaMemcpy(gpu_a2, temp, sizeof(double)*M*N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b2, sigma_invT, sizeof(double)*N*M, cudaMemcpyHostToDevice); unsigned int g_rows2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid2(g_cols2, g_rows2); dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE); gpu_matmul<<<dimGrid2, dimBlock2>>>(gpu_a2, gpu_b2, gpu_c2, M, N, M); cudaMemcpy(v, gpu_c2, sizeof(double)*M*M, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(gpu_a2); cudaFree(gpu_b2); cudaFree(gpu_c2); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) printf("%f ", v_t[i][j]); // printf("\n"); // } // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) (*V_T)[i*M+j] = v_t[i][j]; // } for(int i=0; i<M; i++){ for(int j=0; j<M; j++) (*V_T)[i*M+j] = v[j*M + i]; } // for(int i=0; i<M; i++){ // for(int j=0; j<M; j++) printf("%f ", V_T[i][j]); // printf("\n"); // } double num=0; int k=0; double sigmasqsum=0; for(k=0; k<N; k++){ sigmasqsum += (*SIGMA)[k]*(*SIGMA)[k]; } for(k=0; k<N; k++){ num += ((*SIGMA)[k]*(*SIGMA)[k])/sigmasqsum; if(num >= retention/100.0){ break; } } *K = k+1; // double **newU; // double **newU = (double**)malloc(sizeof(double*)*N*(k+1)); double *newU = (double*)malloc(sizeof(double)*N*(k+1)); // double **newU = (double**)malloc(sizeof(double*)*N); // for (int i=0; i<N; i++) // newU[i] = (double*)malloc(sizeof(double)*(k+1)); for(int i=0; i<N; i++){ for(int j=0;j<k+1;j++){ newU[i*(k+1) + j] = (u)[i*N + j]; } } // for(int i=0; i<N; i++){ // for(int j=0; j<(k+1); j++) printf("%f ", newU[i][j]); // printf("\n"); // } // double **d_hat = (double**)malloc(sizeof(double*)*M); // for (int i=0; i<(k+1); i++) // d_hat[i] = (double*)malloc(sizeof(double)*(k+1)); // d_hat = mat_mul(d, M, N, newU, N, (k+1)); /////////////////////////////////////////////////////////////// double *d_hat = (double*)malloc(sizeof(double)*M*(k+1)); // v = (double*)malloc(sizeof(double)*M*M); double *gpu_a3, *gpu_b3, *gpu_c3; cudaMalloc((void **) &gpu_a3, sizeof(double)*M*N); cudaMalloc((void **) &gpu_b3, sizeof(double)*N*(k+1)); cudaMalloc((void **) &gpu_c3, sizeof(double)*M*(k+1)); // copying matrix d_t and d from host to device memory cudaMemcpy(gpu_a3, d, sizeof(double)*M*N, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b3, newU, sizeof(double)*N*(k+1), cudaMemcpyHostToDevice); unsigned int g_rows3 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int g_cols3 = ((k+1) + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid3(g_cols3, g_rows3); dim3 dimBlock3(BLOCK_SIZE, BLOCK_SIZE); gpu_matmul<<<dimGrid3, dimBlock3>>>(gpu_a3, gpu_b3, gpu_c3, M, N, (k+1)); cudaMemcpy(d_hat, gpu_c3, sizeof(double)*M*(k+1), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(gpu_a3); cudaFree(gpu_b3); cudaFree(gpu_c3); // for(int i=0;i<N;i++){ // for(int j=0;j<N;j++){ // printf("%f ", (product1)[i*N+j]); // } // printf("\n"); // } /////////////////////////////////////////////////////////////// *D_HAT = (double*) malloc(sizeof(double) * M*(k+1)); for(int i=0; i<M; i++){ for(int j=0;j<k+1;j++){ (*D_HAT)[i*(k+1)+j] = d_hat[i*(k+1) + j]; } } }
6638e2eb8a9e6e711bf11ee5334f7831c5a4e316.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <hiprand/hiprand.h> #include <sys/time.h> #include <errno.h> #include <unistd.h> #include <rocblas.h> #ifndef TILE_SIZE #define TILE_SIZE 16 #endif #define THRESHOLD 1e-3 /* CUDA layout */ dim3 grid(1); dim3 block(TILE_SIZE, TILE_SIZE); /* from cuda samples */ void checkGpuError(hipError_t result, char const *const func, const char *const file, int const line) { if(result!=hipSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(result)); exit(1); } } // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) checkGpuError((val), #val, __FILE__, __LINE__) /* https://gist.github.com/Tener/803377 */ #define CURAND_CALL(x) { \ do { \ if((x) != HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(1); \ } \ } while(0); \ } /* time diff in ms */ double elapsed(struct timeval t0, struct timeval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /* compare matrix with abs difference */ void compare_matrix(float *matrix_a, float *matrix_b, long size, double threshold) { for (long i = 0; i < size*size; i++) { if (fabs((double)matrix_a[i] - (double)matrix_b[i]) > threshold) { fprintf(stderr, "Compare matrix failed: %f vs %f\n", matrix_a[i], matrix_b[i]); exit(1); } } } /* init matrix with hiprand */ void init_matrix(float *matrix, long size, unsigned long long seed) { float *d_matrix = NULL; hiprandGenerator_t gen; checkCudaErrors(hipMalloc(&d_matrix, sizeof(float)*size*size)); CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed)); CURAND_CALL(hiprandGenerateUniform(gen, d_matrix, size*size)); checkCudaErrors(hipMemcpy(matrix, d_matrix, sizeof(float)*size*size, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_matrix)); CURAND_CALL(hiprandDestroyGenerator(gen)); } /* C = AB on CPU with re-ordered loop */ void cpu_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); for (long i = 0; i < size; i++) { for (long k = 0; k < size; k++) { for (long j = 0; j < size; j++) { C[i * size + j] += A[i * size + k] * B[k * size + j]; } } } gettimeofday(&t1, NULL); printf("CPU matmul:\t\t\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with global memory */ __global__ void naive_sgemm_kernel(float *C, float *A, float *B, long size) { const long i = blockIdx.x * blockDim.x + threadIdx.x; const long j = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; if (i >= size || j >= size) return; for (long k = 0; k < size; k++) { val += A[i * size + k] * B[k * size + j]; } C[i * size + j] += val; } /* matmul with global memory */ void naive_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); hipLaunchKernelGGL(( naive_sgemm_kernel), dim3(grid), dim3(block), 0, 0, C, A, B, size); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (global memory):\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with shared memory */ __global__ void shared_sgemm_kernel(float *C, float *A, float *B, long size) { const long col = blockIdx.x * blockDim.x + threadIdx.x; const long row = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; /* TODO declare shared memory with size TILE_SIZE x TILE_SIZE */ __shared__ float tile_A[TILE_SIZE][TILE_SIZE]; __shared__ float tile_B[TILE_SIZE][TILE_SIZE]; if (col < size && row < size) { const long local_col = blockIdx.x * TILE_SIZE + threadIdx.x; const long local_row = blockIdx.y * TILE_SIZE + threadIdx.y; for (long m = 0; m < size / TILE_SIZE; ++m) { tile_A[threadIdx.y][threadIdx.x] = A[local_row * size + (m * TILE_SIZE + threadIdx.x)]; tile_B[threadIdx.y][threadIdx.x] = B[(m * TILE_SIZE + threadIdx.y) * size + local_col]; __syncthreads(); /* TODO introduce a pragma directive that can potentially improve performance here */ #pragma unroll for (long k = 0; k < TILE_SIZE; ++k) { /* TODO Perform multiplication here */ val += tile_A[threadIdx.y][k] * tile_B[k][threadIdx.x]; } __syncthreads(); } C[local_row * size + local_col] = val; } } /* matmul with shared memory */ void shared_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); hipLaunchKernelGGL(( shared_sgemm_kernel), dim3(grid), dim3(block), 0, 0, C, A, B, size); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (shared memory):\t%f ms\n", elapsed(t0, t1)); } /* cuBLAS */ void cublas_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; float alpha = 1.0; float beta = 0.0; hipblasHandle_t handle; hipblasCreate(&handle); gettimeofday(&t0, NULL); /* TODO fill in the blanks, do C = BA instead of C = AB */ hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, &alpha, B, size, A, size, &beta, C, size); checkCudaErrors(hipDeviceSynchronize()); gettimeofday(&t1, NULL); hipblasDestroy(handle); printf("GPU cuBLAS matmul:\t\t%f ms\n", elapsed(t0, t1)); } void print_usage(char *program) { fprintf(stderr, "Usage: %s [-s size] [-v to verify with CPU sgemm]\n", program); } int main(int argc, char *argv[]) { int opt; long size = 64; bool verify = false; while ((opt = getopt(argc, argv, "s:v")) != -1) { switch (opt) { case 's': size = atol(optarg); if (size % TILE_SIZE != 0) { fprintf(stderr, "Error: Matrix size must be a multiple of tile size %d.\n", TILE_SIZE); exit(1); } break; case 'v': verify = true; printf("Matrix size: %ldx%ld\n", size, size); break; default: print_usage(argv[0]); exit(1); } } grid = dim3(((size + (TILE_SIZE - 1)) / TILE_SIZE), ((size + (TILE_SIZE - 1)) / TILE_SIZE)); printf("Matrix size: %ldx%ld\n", size, size); printf("Grid size: %ux%u\n", grid.x, grid.y); printf("Tile size: %ux%u\n", TILE_SIZE, TILE_SIZE); printf("Run CPU sgemm: %d\n\n", verify); float *A = (float*)malloc(sizeof(float)*size*size); float *B = (float*)malloc(sizeof(float)*size*size); float *C_result = (float*)malloc(sizeof(float)*size*size); float *C_truth = (float*)malloc(sizeof(float)*size*size); float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; if (A == NULL || B == NULL || C_truth == NULL || C_result == NULL) { fprintf(stderr, "Error: %s\n", strerror(errno)); exit(1); } /* initialize A and B */ init_matrix(A, size, 1); init_matrix(B, size, 5); memset(C_truth, 0, sizeof(float)*size*size); /* allocate A and B on GPU */ checkCudaErrors(hipMalloc(&d_A, sizeof(float)*size*size)); checkCudaErrors(hipMalloc(&d_B, sizeof(float)*size*size)); checkCudaErrors(hipMalloc(&d_C, sizeof(float)*size*size)); /* copy A and B to GPU */ checkCudaErrors(hipMemcpy(d_A, A, sizeof(float)*size*size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_B, B, sizeof(float)*size*size, hipMemcpyHostToDevice)); /* host gemm */ if (verify) { cpu_sgemm(C_truth, A, B, size); } /* set C on GPU and run cublas */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); cublas_sgemm(d_C, d_A, d_B, size); checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run naive gpu gemm */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); naive_sgemm(d_C, d_A, d_B, size); checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run shared */ checkCudaErrors(hipMemset(d_C, 0, sizeof(float)*size*size)); shared_sgemm(d_C, d_A, d_B, size); checkCudaErrors(hipMemcpy(C_result, d_C, sizeof(float)*size*size, hipMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* free */ checkCudaErrors(hipFree(d_A)); checkCudaErrors(hipFree(d_B)); checkCudaErrors(hipFree(d_C)); free(A); free(B); free(C_truth); free(C_result); return 0; }
6638e2eb8a9e6e711bf11ee5334f7831c5a4e316.cu
#include <cuda.h> #include <stdio.h> #include <curand.h> #include <sys/time.h> #include <errno.h> #include <unistd.h> #include <cublas_v2.h> #ifndef TILE_SIZE #define TILE_SIZE 16 #endif #define THRESHOLD 1e-3 /* CUDA layout */ dim3 grid(1); dim3 block(TILE_SIZE, TILE_SIZE); /* from cuda samples */ void checkGpuError(cudaError_t result, char const *const func, const char *const file, int const line) { if(result!=cudaSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(result)); exit(1); } } // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) checkGpuError((val), #val, __FILE__, __LINE__) /* https://gist.github.com/Tener/803377 */ #define CURAND_CALL(x) { \ do { \ if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(1); \ } \ } while(0); \ } /* time diff in ms */ double elapsed(struct timeval t0, struct timeval t1) { return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L; } /* compare matrix with abs difference */ void compare_matrix(float *matrix_a, float *matrix_b, long size, double threshold) { for (long i = 0; i < size*size; i++) { if (fabs((double)matrix_a[i] - (double)matrix_b[i]) > threshold) { fprintf(stderr, "Compare matrix failed: %f vs %f\n", matrix_a[i], matrix_b[i]); exit(1); } } } /* init matrix with curand */ void init_matrix(float *matrix, long size, unsigned long long seed) { float *d_matrix = NULL; curandGenerator_t gen; checkCudaErrors(cudaMalloc(&d_matrix, sizeof(float)*size*size)); CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed)); CURAND_CALL(curandGenerateUniform(gen, d_matrix, size*size)); checkCudaErrors(cudaMemcpy(matrix, d_matrix, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_matrix)); CURAND_CALL(curandDestroyGenerator(gen)); } /* C = AB on CPU with re-ordered loop */ void cpu_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); for (long i = 0; i < size; i++) { for (long k = 0; k < size; k++) { for (long j = 0; j < size; j++) { C[i * size + j] += A[i * size + k] * B[k * size + j]; } } } gettimeofday(&t1, NULL); printf("CPU matmul:\t\t\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with global memory */ __global__ void naive_sgemm_kernel(float *C, float *A, float *B, long size) { const long i = blockIdx.x * blockDim.x + threadIdx.x; const long j = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; if (i >= size || j >= size) return; for (long k = 0; k < size; k++) { val += A[i * size + k] * B[k * size + j]; } C[i * size + j] += val; } /* matmul with global memory */ void naive_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); naive_sgemm_kernel<<<grid, block>>>(C, A, B, size); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (global memory):\t%f ms\n", elapsed(t0, t1)); } /* matmul kernel with shared memory */ __global__ void shared_sgemm_kernel(float *C, float *A, float *B, long size) { const long col = blockIdx.x * blockDim.x + threadIdx.x; const long row = blockIdx.y * blockDim.y + threadIdx.y; float val = 0.0; /* TODO declare shared memory with size TILE_SIZE x TILE_SIZE */ __shared__ float tile_A[TILE_SIZE][TILE_SIZE]; __shared__ float tile_B[TILE_SIZE][TILE_SIZE]; if (col < size && row < size) { const long local_col = blockIdx.x * TILE_SIZE + threadIdx.x; const long local_row = blockIdx.y * TILE_SIZE + threadIdx.y; for (long m = 0; m < size / TILE_SIZE; ++m) { tile_A[threadIdx.y][threadIdx.x] = A[local_row * size + (m * TILE_SIZE + threadIdx.x)]; tile_B[threadIdx.y][threadIdx.x] = B[(m * TILE_SIZE + threadIdx.y) * size + local_col]; __syncthreads(); /* TODO introduce a pragma directive that can potentially improve performance here */ #pragma unroll for (long k = 0; k < TILE_SIZE; ++k) { /* TODO Perform multiplication here */ val += tile_A[threadIdx.y][k] * tile_B[k][threadIdx.x]; } __syncthreads(); } C[local_row * size + local_col] = val; } } /* matmul with shared memory */ void shared_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; gettimeofday(&t0, NULL); shared_sgemm_kernel<<<grid, block>>>(C, A, B, size); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); printf("GPU matmul (shared memory):\t%f ms\n", elapsed(t0, t1)); } /* cuBLAS */ void cublas_sgemm(float *C, float *A, float *B, long size) { struct timeval t0, t1; float alpha = 1.0; float beta = 0.0; cublasHandle_t handle; cublasCreate(&handle); gettimeofday(&t0, NULL); /* TODO fill in the blanks, do C = BA instead of C = AB */ cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, B, size, A, size, &beta, C, size); checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&t1, NULL); cublasDestroy(handle); printf("GPU cuBLAS matmul:\t\t%f ms\n", elapsed(t0, t1)); } void print_usage(char *program) { fprintf(stderr, "Usage: %s [-s size] [-v to verify with CPU sgemm]\n", program); } int main(int argc, char *argv[]) { int opt; long size = 64; bool verify = false; while ((opt = getopt(argc, argv, "s:v")) != -1) { switch (opt) { case 's': size = atol(optarg); if (size % TILE_SIZE != 0) { fprintf(stderr, "Error: Matrix size must be a multiple of tile size %d.\n", TILE_SIZE); exit(1); } break; case 'v': verify = true; printf("Matrix size: %ldx%ld\n", size, size); break; default: print_usage(argv[0]); exit(1); } } grid = dim3(((size + (TILE_SIZE - 1)) / TILE_SIZE), ((size + (TILE_SIZE - 1)) / TILE_SIZE)); printf("Matrix size: %ldx%ld\n", size, size); printf("Grid size: %ux%u\n", grid.x, grid.y); printf("Tile size: %ux%u\n", TILE_SIZE, TILE_SIZE); printf("Run CPU sgemm: %d\n\n", verify); float *A = (float*)malloc(sizeof(float)*size*size); float *B = (float*)malloc(sizeof(float)*size*size); float *C_result = (float*)malloc(sizeof(float)*size*size); float *C_truth = (float*)malloc(sizeof(float)*size*size); float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; if (A == NULL || B == NULL || C_truth == NULL || C_result == NULL) { fprintf(stderr, "Error: %s\n", strerror(errno)); exit(1); } /* initialize A and B */ init_matrix(A, size, 1); init_matrix(B, size, 5); memset(C_truth, 0, sizeof(float)*size*size); /* allocate A and B on GPU */ checkCudaErrors(cudaMalloc(&d_A, sizeof(float)*size*size)); checkCudaErrors(cudaMalloc(&d_B, sizeof(float)*size*size)); checkCudaErrors(cudaMalloc(&d_C, sizeof(float)*size*size)); /* copy A and B to GPU */ checkCudaErrors(cudaMemcpy(d_A, A, sizeof(float)*size*size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_B, B, sizeof(float)*size*size, cudaMemcpyHostToDevice)); /* host gemm */ if (verify) { cpu_sgemm(C_truth, A, B, size); } /* set C on GPU and run cublas */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); cublas_sgemm(d_C, d_A, d_B, size); checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run naive gpu gemm */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); naive_sgemm(d_C, d_A, d_B, size); checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* run shared */ checkCudaErrors(cudaMemset(d_C, 0, sizeof(float)*size*size)); shared_sgemm(d_C, d_A, d_B, size); checkCudaErrors(cudaMemcpy(C_result, d_C, sizeof(float)*size*size, cudaMemcpyDeviceToHost)); compare_matrix(C_result, C_truth, size, THRESHOLD); /* free */ checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); free(A); free(B); free(C_truth); free(C_result); return 0; }
1719fed9abda7c86c3595e0c7ae9b8143f138462.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "lodepng.h" #include "gputimer.h" #define BLOCK_WIDTH 16 __device__ void synthesize_inner(int i, int r, int c, double *u, double *u1, double *u2) { double P = 0.5; int N = 4; double E = 0.0002; if (r > 0 && r < N - 1 && c > 0 && c < N - 1) { u[i] = (P * (u1[i+N] + u1[i-N] + u1[i-1] + u1[i+1] - N * u1[i]) + 2 * u1[i] - (1 - E) * u2[i]) / (1 + E); } } __device__ void synthesize_edge(int i, int r, int c, double *u, double *u1, double *u2) { double G = 0.75; int N = 4; if (r == 0) { u[i] = G * u[i+N]; } else if (r == N - 1) { u[i] = G * u[i-N]; } else if (c == 0) { u[i] = G * u[i+1]; } else if (c == N - 1) { u[i] = G * u[i-1]; } } __device__ void synthesize_corner(int i, int r, int c, double *u, double *u1, double *u2) { double G = 0.75; int N = 4; if (r == 0 and c == 0) { u[i] = G * u[i+N]; } else if (r == N - 1 and c == 0) { u[i] = G * u[i-N]; } else if (r == 0 and c == N - 1) { u[i] = G * u[i-1]; } else if (r == N - 1 and c == N - 1) { u[i] = G * u[i-1]; } } __global__ void process(double *u, double *u1, double *u2, double *gr, int iterations) { int tid = threadIdx.x; int r = tid/4; int c = tid%4; int i; for (i = 0; i < iterations; i++) { synthesize_inner(tid, r, c, u, u1, u2); __syncthreads(); synthesize_edge(tid, r, c, u, u1, u2); __syncthreads(); synthesize_corner(tid, r, c, u, u1, u2); __syncthreads(); memcpy(u2, u1, 16 * sizeof(double)); memcpy(u1, u, 16 * sizeof(double)); gr[i] = u[10]; } } int main(int argc, char *argv[]) { GpuTimer timer; if (argc < 2) { printf("Incorrect arguments! Input format: ./grid_4_4 <number of iteration> \n"); return 1; } int i; int iterations = atoi(argv[1]); double u[16], u1[16], u2[16], r[iterations]; double *gu, *gu1, *gu2, *gr; for (i = 0; i < 16; i++) { u[i] = 0; u1[i] = 0; u2[i] = 0; } u1[10] = 1; //printf("NUM_THREADS: %d, with width %d and height %d\n", NUM_THREADS, width, height); hipMalloc(&gu, 16 * sizeof(double)); hipMalloc(&gu1, 16 * sizeof(double)); hipMalloc(&gu2, 16 * sizeof(double)); hipMalloc(&gr, iterations * sizeof(double)); hipMemcpy(gu, u, 16 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gu1, u1, 16 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gu2, u2, 16 * sizeof(double), hipMemcpyHostToDevice); hipMemset((void *)gr, 0, iterations * sizeof(double)); // launch the kernel timer.Start(); hipLaunchKernelGGL(( process), dim3(1), dim3(BLOCK_WIDTH), 0, 0, gu, gu1, gu2, gr, iterations); timer.Stop(); // copy back the result array to the CPU hipMemcpy(u, gu, 16 * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(u1, gu1, 16 * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(u2, gu2, 16 * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(r, gr, iterations * sizeof(double), hipMemcpyDeviceToHost); hipFree(gu); hipFree(gu1); hipFree(gu2); hipFree(gr); /* int j; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { printf(" %f", u[4*i + j]); } printf("\n"); } */ printf("%f", r[0]); for (i = 1; i < iterations; i++) { printf(", %f", r[i]); } printf("\nTime elapsed = %g ms with %d iterations\n", timer.Elapsed(), iterations); // free(u); // free(u1); // free(u2); return 0; }
1719fed9abda7c86c3595e0c7ae9b8143f138462.cu
#include <stdio.h> #include <stdlib.h> #include "lodepng.h" #include "gputimer.h" #define BLOCK_WIDTH 16 __device__ void synthesize_inner(int i, int r, int c, double *u, double *u1, double *u2) { double P = 0.5; int N = 4; double E = 0.0002; if (r > 0 && r < N - 1 && c > 0 && c < N - 1) { u[i] = (P * (u1[i+N] + u1[i-N] + u1[i-1] + u1[i+1] - N * u1[i]) + 2 * u1[i] - (1 - E) * u2[i]) / (1 + E); } } __device__ void synthesize_edge(int i, int r, int c, double *u, double *u1, double *u2) { double G = 0.75; int N = 4; if (r == 0) { u[i] = G * u[i+N]; } else if (r == N - 1) { u[i] = G * u[i-N]; } else if (c == 0) { u[i] = G * u[i+1]; } else if (c == N - 1) { u[i] = G * u[i-1]; } } __device__ void synthesize_corner(int i, int r, int c, double *u, double *u1, double *u2) { double G = 0.75; int N = 4; if (r == 0 and c == 0) { u[i] = G * u[i+N]; } else if (r == N - 1 and c == 0) { u[i] = G * u[i-N]; } else if (r == 0 and c == N - 1) { u[i] = G * u[i-1]; } else if (r == N - 1 and c == N - 1) { u[i] = G * u[i-1]; } } __global__ void process(double *u, double *u1, double *u2, double *gr, int iterations) { int tid = threadIdx.x; int r = tid/4; int c = tid%4; int i; for (i = 0; i < iterations; i++) { synthesize_inner(tid, r, c, u, u1, u2); __syncthreads(); synthesize_edge(tid, r, c, u, u1, u2); __syncthreads(); synthesize_corner(tid, r, c, u, u1, u2); __syncthreads(); memcpy(u2, u1, 16 * sizeof(double)); memcpy(u1, u, 16 * sizeof(double)); gr[i] = u[10]; } } int main(int argc, char *argv[]) { GpuTimer timer; if (argc < 2) { printf("Incorrect arguments! Input format: ./grid_4_4 <number of iteration> \n"); return 1; } int i; int iterations = atoi(argv[1]); double u[16], u1[16], u2[16], r[iterations]; double *gu, *gu1, *gu2, *gr; for (i = 0; i < 16; i++) { u[i] = 0; u1[i] = 0; u2[i] = 0; } u1[10] = 1; //printf("NUM_THREADS: %d, with width %d and height %d\n", NUM_THREADS, width, height); cudaMalloc(&gu, 16 * sizeof(double)); cudaMalloc(&gu1, 16 * sizeof(double)); cudaMalloc(&gu2, 16 * sizeof(double)); cudaMalloc(&gr, iterations * sizeof(double)); cudaMemcpy(gu, u, 16 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gu1, u1, 16 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gu2, u2, 16 * sizeof(double), cudaMemcpyHostToDevice); cudaMemset((void *)gr, 0, iterations * sizeof(double)); // launch the kernel timer.Start(); process<<<1, BLOCK_WIDTH>>>(gu, gu1, gu2, gr, iterations); timer.Stop(); // copy back the result array to the CPU cudaMemcpy(u, gu, 16 * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(u1, gu1, 16 * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(u2, gu2, 16 * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(r, gr, iterations * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(gu); cudaFree(gu1); cudaFree(gu2); cudaFree(gr); /* int j; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { printf(" %f", u[4*i + j]); } printf("\n"); } */ printf("%f", r[0]); for (i = 1; i < iterations; i++) { printf(", %f", r[i]); } printf("\nTime elapsed = %g ms with %d iterations\n", timer.Elapsed(), iterations); // free(u); // free(u1); // free(u2); return 0; }
629ec74576a55e313b9f860419820839ee117e19.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //#include <cutil.h> #include <fstream> #include <iostream> #ifdef __NVCC__ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #else #include <__clang_cuda_runtime_wrapper.h> #endif #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // includes, system #include <vector> // includes, project // #include "exception.h" // Definition of the StopWatch Interface, this is used if we don't want to use // the CUT functions But rather in a self contained class interface class StopWatchInterface { public: StopWatchInterface(){}; virtual ~StopWatchInterface(){}; public: //! Start time measurement virtual void start() = 0; //! Stop time measurement virtual void stop() = 0; //! Reset time counters to zero virtual void reset() = 0; //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned virtual float getTime() = 0; //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time virtual float getAverageTime() = 0; }; ////////////////////////////////////////////////////////////////// // Begin Stopwatch timer class definitions for all OS platforms // ////////////////////////////////////////////////////////////////// #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // includes, system #define WINDOWS_LEAN_AND_MEAN #include <windows.h> #undef min #undef max //! Windows specific implementation of StopWatch class StopWatchWin : public StopWatchInterface { public: //! Constructor, default StopWatchWin() : start_time(), end_time(), diff_time(0.0f), total_time(0.0f), running(false), clock_sessions(0), freq(0), freq_set(false) { if (!freq_set) { // helper variable LARGE_INTEGER temp; // get the tick frequency from the OS QueryPerformanceFrequency((LARGE_INTEGER *)&temp); // convert to type in which it is needed freq = ((double)temp.QuadPart) / 1000.0; // rememeber query freq_set = true; } }; // Destructor ~StopWatchWin(){}; public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // member variables //! Start of measurement LARGE_INTEGER start_time; //! End of measurement LARGE_INTEGER end_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; //! tick frequency double freq; //! flag if the frequency has been set bool freq_set; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::start() { QueryPerformanceCounter((LARGE_INTEGER *)&start_time); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::stop() { QueryPerformanceCounter((LARGE_INTEGER *)&end_time); diff_time = (float)(((double)end_time.QuadPart - (double)start_time.QuadPart) / freq); total_time += diff_time; clock_sessions++; running = false; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { QueryPerformanceCounter((LARGE_INTEGER *)&start_time); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { LARGE_INTEGER temp; QueryPerformanceCounter((LARGE_INTEGER *)&temp); retval += (float)(((double)(temp.QuadPart - start_time.QuadPart)) / freq); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } #else // Declarations for Stopwatch on Linux and Mac OSX // includes, system #include <ctime> #include <sys/time.h> //! Windows specific implementation of StopWatch class StopWatchLinux : public StopWatchInterface { public: //! Constructor, default StopWatchLinux() : start_time(), diff_time(0.0), total_time(0.0), running(false), clock_sessions(0){}; // Destructor virtual ~StopWatchLinux(){}; public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // helper functions //! Get difference between start time and current time inline float getDiffTime(); private: // member variables //! Start of measurement struct timeval start_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::start() { gettimeofday(&start_time, 0); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::stop() { diff_time = getDiffTime(); total_time += diff_time; running = false; clock_sessions++; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { gettimeofday(&start_time, 0); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { retval += getDiffTime(); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getDiffTime() { struct timeval t_time; gettimeofday(&t_time, 0); // time difference in milli-seconds return (float)(1000.0 * (t_time.tv_sec - start_time.tv_sec) + (0.001 * (t_time.tv_usec - start_time.tv_usec))); } #endif // WIN32 //////////////////////////////////////////////////////////////////////////////// //! Timer functionality exported //////////////////////////////////////////////////////////////////////////////// //! Create a new timer //! @return true if a time has been created, otherwise false //! @param name of the new timer, 0 if the creation failed //////////////////////////////////////////////////////////////////////////////// inline bool sdkCreateTimer(StopWatchInterface **timer_interface) { // printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) *timer_interface = (StopWatchInterface *)new StopWatchWin(); #else *timer_interface = (StopWatchInterface *)new StopWatchLinux(); #endif return (*timer_interface != NULL) ? true : false; } //////////////////////////////////////////////////////////////////////////////// //! Delete a timer //! @return true if a time has been deleted, otherwise false //! @param name of the timer to delete //////////////////////////////////////////////////////////////////////////////// inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) { // printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { delete *timer_interface; *timer_interface = NULL; } return true; } //////////////////////////////////////////////////////////////////////////////// //! Start the time with name \a name //! @param name name of the timer to start //////////////////////////////////////////////////////////////////////////////// inline bool sdkStartTimer(StopWatchInterface **timer_interface) { // printf("sdkStartTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->start(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Stop the time with name \a name. Does not reset. //! @param name name of the timer to stop //////////////////////////////////////////////////////////////////////////////// inline bool sdkStopTimer(StopWatchInterface **timer_interface) { // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->stop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Resets the timer's counter. //! @param name name of the timer to reset. //////////////////////////////////////////////////////////////////////////////// inline bool sdkResetTimer(StopWatchInterface **timer_interface) { // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->reset(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Return the average time for timer execution as the total time //! for the timer dividied by the number of completed (stopped) runs the timer //! has made. //! Excludes the current running time if the timer is currently running. //! @param name name of the timer to return the time of //////////////////////////////////////////////////////////////////////////////// inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetAverageTimerValue called object %08x\n", (void // *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getAverageTime(); } else { return 0.0f; } } //////////////////////////////////////////////////////////////////////////////// //! Total execution time for the timer over all runs since the last reset //! or timer creation. //! @param name name of the timer to obtain the value of. //////////////////////////////////////////////////////////////////////////////// inline float sdkGetTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getTime(); } else { return 0.0f; } } /* * Options * */ #define GAMMA 1.4f #define iterations 2000 // #ifndef block_length // #define block_length 192 // #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_0 RD_WG_SIZE #else #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_1 RD_WG_SIZE #else #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_2 RD_WG_SIZE #else #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_3 RD_WG_SIZE #else #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_4 RD_WG_SIZE #else #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length // is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM + NDIM) #define NVAR (VAR_DENSITY_ENERGY + 1) #define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, hipGetErrorString(err)); exit(-1); } } #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(-1); } } /* * Generic functions */ template <typename T> T *alloc(int N) { T *t; checkCudaErrors(hipMalloc((void **)&t, sizeof(T) * N)); return t; } template <typename T> void dealloc(T *array) { checkCudaErrors(hipFree((void *)array)); } template <typename T> void copy(T *dst, T *src, int N) { checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyDeviceToDevice)); } template <typename T> void upload(T *dst, T *src, int N) { checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyHostToDevice)); } template <typename T> void download(T *dst, T *src, int N) { checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyDeviceToHost)); } void dump(float *variables, int nel, int nelr) { float *h_variables = new float[nelr * NVAR]; download(h_variables, variables, nelr * NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY * nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) { for (int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM + j) * nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY * nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float *variables) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); for (int j = 0; j < NVAR; j++) variables[i + j * nelr] = ff_variable[j]; } void initialize_variables(int nelr, float *variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); getLastCudaError("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution( float &density, float3 &momentum, float &density_energy, float &pressure, float3 &velocity, float3 &fc_momentum_x, float3 &fc_momentum_y, float3 &fc_momentum_z, float3 &fc_density_energy) { fc_momentum_x.x = velocity.x * momentum.x + pressure; fc_momentum_x.y = velocity.x * momentum.y; fc_momentum_x.z = velocity.x * momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y * momentum.y + pressure; fc_momentum_y.z = velocity.y * momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z * momentum.z + pressure; float de_p = density_energy + pressure; fc_density_energy.x = velocity.x * de_p; fc_density_energy.y = velocity.y * de_p; fc_density_energy.z = velocity.z * de_p; } __device__ inline void compute_velocity(float &density, float3 &momentum, float3 &velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3 &velocity) { return velocity.x * velocity.x + velocity.y * velocity.y + velocity.z * velocity.z; } __device__ inline float compute_pressure(float &density, float &density_energy, float &speed_sqd) { return (float(GAMMA) - float(1.0f)) * (density_energy - float(0.5f) * density * speed_sqd); } __device__ inline float compute_speed_of_sound(float &density, float &pressure) { return sqrtf(float(GAMMA) * pressure / density); } __global__ void cuda_compute_step_factor(int nelr, float *variables, float *areas, float *step_factors) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY * nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM + 0) * nelr]; momentum.y = variables[i + (VAR_MOMENTUM + 1) * nelr]; momentum.z = variables[i + (VAR_MOMENTUM + 2) * nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time // stepping, this later would need to be divided by the area, so we just do it // all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float *variables, float *areas, float *step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); getLastCudaError("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x * blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY * nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution( density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; for (j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j * nelr]; normal.x = normals[i + (j + 0 * NNB) * nelr]; normal.y = normals[i + (j + 1 * NNB) * nelr]; normal.z = normals[i + (j + 2 * NNB) * nelr]; normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); if (nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY * nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution( density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor * (density_i - density_nb); flux_i_density_energy += factor * (density_energy_i - density_energy_nb); flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x); flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y); flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f) * normal.x; flux_i_density += factor * (momentum_nb.x + momentum_i.x); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x); factor = float(0.5f) * normal.y; flux_i_density += factor * (momentum_nb.y + momentum_i.y); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y); factor = float(0.5f) * normal.z; flux_i_density += factor * (momentum_nb.z + momentum_i.z); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z); } else if (nb == -1) // a wing boundary { flux_i_momentum.x += normal.x * pressure_i; flux_i_momentum.y += normal.y * pressure_i; flux_i_momentum.z += normal.z * pressure_i; } else if (nb == -2) // a far field boundary { factor = float(0.5f) * normal.x; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f) * normal.y; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f) * normal.z; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY * nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy; } void compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg), dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); getLastCudaError("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); float factor = step_factors[i] / float(RK + 1 - j); variables[i + VAR_DENSITY * nelr] = old_variables[i + VAR_DENSITY * nelr] + factor * fluxes[i + VAR_DENSITY * nelr]; variables[i + VAR_DENSITY_ENERGY * nelr] = old_variables[i + VAR_DENSITY_ENERGY * nelr] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr]; variables[i + (VAR_MOMENTUM + 0) * nelr] = old_variables[i + (VAR_MOMENTUM + 0) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr]; variables[i + (VAR_MOMENTUM + 1) * nelr] = old_variables[i + (VAR_MOMENTUM + 1) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr]; variables[i + (VAR_MOMENTUM + 2) * nelr] = old_variables[i + (VAR_MOMENTUM + 2) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr]; } void time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg), dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); getLastCudaError("update failed"); } /* * Main function */ int main(int argc, char **argv) { printf("WG size of kernel:initialize = %d, WG size of " "kernel:compute_step_factor = %d, WG size of kernel:compute_flux = " "%d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char *data_file_name = argv[1]; hipDeviceProp_t prop; int dev; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipGetDevice(&dev)); checkCudaErrors(hipGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA * ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach) * ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed * float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed * float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM + 0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM + 1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM + 2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY] * (float(0.5f) * (ff_speed * ff_speed)) + (ff_pressure / float(GAMMA - 1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable + VAR_MOMENTUM + 0); h_ff_momentum.y = *(h_ff_variable + VAR_MOMENTUM + 1); h_ff_momentum.z = *(h_ff_variable + VAR_MOMENTUM + 2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR * sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3))); checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3))); checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3))); checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3))); } int nel; int nelr; // read in domain geometry float *areas; int *elements_surrounding_elements; float *normals; { std::ifstream file(data_file_name); file >> nel; nelr = BLOCK_SIZE_0 * ((nel / BLOCK_SIZE_0) + ::min(1, nel % BLOCK_SIZE_0)); float *h_areas = new float[nelr]; int *h_elements_surrounding_elements = new int[nelr * NNB]; float *h_normals = new float[nelr * NDIM * NNB]; // read in data for (int i = 0; i < nel; i++) { file >> h_areas[i]; for (int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j * nelr]; if (h_elements_surrounding_elements[i + j * nelr] < 0) h_elements_surrounding_elements[i + j * nelr] = -1; h_elements_surrounding_elements[i + j * nelr]--; // it's coming in with // Fortran numbering for (int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k * NNB) * nelr]; h_normals[i + (j + k * NNB) * nelr] = -h_normals[i + (j + k * NNB) * nelr]; } } } // fill in remaining data int last = nel - 1; for (int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for (int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j * nelr] = h_elements_surrounding_elements[last + j * nelr]; for (int k = 0; k < NDIM; k++) h_normals[last + (j + k * NNB) * nelr] = h_normals[last + (j + k * NNB) * nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr * NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr * NNB); normals = alloc<float>(nelr * NDIM * NNB); upload<float>(normals, h_normals, nelr * NDIM * NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float *variables = alloc<float>(nelr * NVAR); initialize_variables(nelr, variables); float *old_variables = alloc<float>(nelr * NVAR); float *fluxes = alloc<float>(nelr * NVAR); float *step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset((void *)step_factors, 0, sizeof(float) * nelr); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = 0; // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); sdkCreateTimer(&timer); sdkStartTimer(&timer); // Begin iterations for (int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr * NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); getLastCudaError("compute_step_factor failed"); for (int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); getLastCudaError("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); getLastCudaError("time_step failed"); } } hipDeviceSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer) / 1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
629ec74576a55e313b9f860419820839ee117e19.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper //#include <cutil.h> #include <fstream> #include <iostream> #ifdef __NVCC__ #include <cuda.h> #include <cuda_runtime.h> #else #include <__clang_cuda_runtime_wrapper.h> #endif #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // includes, system #include <vector> // includes, project // #include "exception.h" // Definition of the StopWatch Interface, this is used if we don't want to use // the CUT functions But rather in a self contained class interface class StopWatchInterface { public: StopWatchInterface(){}; virtual ~StopWatchInterface(){}; public: //! Start time measurement virtual void start() = 0; //! Stop time measurement virtual void stop() = 0; //! Reset time counters to zero virtual void reset() = 0; //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned virtual float getTime() = 0; //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time virtual float getAverageTime() = 0; }; ////////////////////////////////////////////////////////////////// // Begin Stopwatch timer class definitions for all OS platforms // ////////////////////////////////////////////////////////////////// #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // includes, system #define WINDOWS_LEAN_AND_MEAN #include <windows.h> #undef min #undef max //! Windows specific implementation of StopWatch class StopWatchWin : public StopWatchInterface { public: //! Constructor, default StopWatchWin() : start_time(), end_time(), diff_time(0.0f), total_time(0.0f), running(false), clock_sessions(0), freq(0), freq_set(false) { if (!freq_set) { // helper variable LARGE_INTEGER temp; // get the tick frequency from the OS QueryPerformanceFrequency((LARGE_INTEGER *)&temp); // convert to type in which it is needed freq = ((double)temp.QuadPart) / 1000.0; // rememeber query freq_set = true; } }; // Destructor ~StopWatchWin(){}; public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // member variables //! Start of measurement LARGE_INTEGER start_time; //! End of measurement LARGE_INTEGER end_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; //! tick frequency double freq; //! flag if the frequency has been set bool freq_set; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::start() { QueryPerformanceCounter((LARGE_INTEGER *)&start_time); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::stop() { QueryPerformanceCounter((LARGE_INTEGER *)&end_time); diff_time = (float)(((double)end_time.QuadPart - (double)start_time.QuadPart) / freq); total_time += diff_time; clock_sessions++; running = false; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { QueryPerformanceCounter((LARGE_INTEGER *)&start_time); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { LARGE_INTEGER temp; QueryPerformanceCounter((LARGE_INTEGER *)&temp); retval += (float)(((double)(temp.QuadPart - start_time.QuadPart)) / freq); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } #else // Declarations for Stopwatch on Linux and Mac OSX // includes, system #include <ctime> #include <sys/time.h> //! Windows specific implementation of StopWatch class StopWatchLinux : public StopWatchInterface { public: //! Constructor, default StopWatchLinux() : start_time(), diff_time(0.0), total_time(0.0), running(false), clock_sessions(0){}; // Destructor virtual ~StopWatchLinux(){}; public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // helper functions //! Get difference between start time and current time inline float getDiffTime(); private: // member variables //! Start of measurement struct timeval start_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::start() { gettimeofday(&start_time, 0); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::stop() { diff_time = getDiffTime(); total_time += diff_time; running = false; clock_sessions++; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { gettimeofday(&start_time, 0); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { retval += getDiffTime(); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getDiffTime() { struct timeval t_time; gettimeofday(&t_time, 0); // time difference in milli-seconds return (float)(1000.0 * (t_time.tv_sec - start_time.tv_sec) + (0.001 * (t_time.tv_usec - start_time.tv_usec))); } #endif // WIN32 //////////////////////////////////////////////////////////////////////////////// //! Timer functionality exported //////////////////////////////////////////////////////////////////////////////// //! Create a new timer //! @return true if a time has been created, otherwise false //! @param name of the new timer, 0 if the creation failed //////////////////////////////////////////////////////////////////////////////// inline bool sdkCreateTimer(StopWatchInterface **timer_interface) { // printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) *timer_interface = (StopWatchInterface *)new StopWatchWin(); #else *timer_interface = (StopWatchInterface *)new StopWatchLinux(); #endif return (*timer_interface != NULL) ? true : false; } //////////////////////////////////////////////////////////////////////////////// //! Delete a timer //! @return true if a time has been deleted, otherwise false //! @param name of the timer to delete //////////////////////////////////////////////////////////////////////////////// inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) { // printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { delete *timer_interface; *timer_interface = NULL; } return true; } //////////////////////////////////////////////////////////////////////////////// //! Start the time with name \a name //! @param name name of the timer to start //////////////////////////////////////////////////////////////////////////////// inline bool sdkStartTimer(StopWatchInterface **timer_interface) { // printf("sdkStartTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->start(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Stop the time with name \a name. Does not reset. //! @param name name of the timer to stop //////////////////////////////////////////////////////////////////////////////// inline bool sdkStopTimer(StopWatchInterface **timer_interface) { // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->stop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Resets the timer's counter. //! @param name name of the timer to reset. //////////////////////////////////////////////////////////////////////////////// inline bool sdkResetTimer(StopWatchInterface **timer_interface) { // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->reset(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Return the average time for timer execution as the total time //! for the timer dividied by the number of completed (stopped) runs the timer //! has made. //! Excludes the current running time if the timer is currently running. //! @param name name of the timer to return the time of //////////////////////////////////////////////////////////////////////////////// inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetAverageTimerValue called object %08x\n", (void // *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getAverageTime(); } else { return 0.0f; } } //////////////////////////////////////////////////////////////////////////////// //! Total execution time for the timer over all runs since the last reset //! or timer creation. //! @param name name of the timer to obtain the value of. //////////////////////////////////////////////////////////////////////////////// inline float sdkGetTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getTime(); } else { return 0.0f; } } /* * Options * */ #define GAMMA 1.4f #define iterations 2000 // #ifndef block_length // #define block_length 192 // #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE_0 RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE_0 RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_0 RD_WG_SIZE #else #define BLOCK_SIZE_0 192 #endif #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_1 RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_1 RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_1 RD_WG_SIZE #else #define BLOCK_SIZE_1 192 #endif #ifdef RD_WG_SIZE_2_0 #define BLOCK_SIZE_2 RD_WG_SIZE_2_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_2 RD_WG_SIZE_2 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_2 RD_WG_SIZE #else #define BLOCK_SIZE_2 192 #endif #ifdef RD_WG_SIZE_3_0 #define BLOCK_SIZE_3 RD_WG_SIZE_3_0 #elif defined(RD_WG_SIZE_3) #define BLOCK_SIZE_3 RD_WG_SIZE_3 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_3 RD_WG_SIZE #else #define BLOCK_SIZE_3 192 #endif #ifdef RD_WG_SIZE_4_0 #define BLOCK_SIZE_4 RD_WG_SIZE_4_0 #elif defined(RD_WG_SIZE_4) #define BLOCK_SIZE_4 RD_WG_SIZE_4 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_4 RD_WG_SIZE #else #define BLOCK_SIZE_4 192 #endif // #if block_length > 128 // #warning "the kernels may fail too launch on some systems if the block length // is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM + NDIM) #define NVAR (VAR_DENSITY_ENERGY + 1) #define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString(err)); exit(-1); } } #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(-1); } } /* * Generic functions */ template <typename T> T *alloc(int N) { T *t; checkCudaErrors(cudaMalloc((void **)&t, sizeof(T) * N)); return t; } template <typename T> void dealloc(T *array) { checkCudaErrors(cudaFree((void *)array)); } template <typename T> void copy(T *dst, T *src, int N) { checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyDeviceToDevice)); } template <typename T> void upload(T *dst, T *src, int N) { checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyHostToDevice)); } template <typename T> void download(T *dst, T *src, int N) { checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyDeviceToHost)); } void dump(float *variables, int nel, int nelr) { float *h_variables = new float[nelr * NVAR]; download(h_variables, variables, nelr * NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY * nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) { for (int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM + j) * nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for (int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY * nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float *variables) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); for (int j = 0; j < NVAR; j++) variables[i + j * nelr] = ff_variable[j]; } void initialize_variables(int nelr, float *variables) { dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); getLastCudaError("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution( float &density, float3 &momentum, float &density_energy, float &pressure, float3 &velocity, float3 &fc_momentum_x, float3 &fc_momentum_y, float3 &fc_momentum_z, float3 &fc_density_energy) { fc_momentum_x.x = velocity.x * momentum.x + pressure; fc_momentum_x.y = velocity.x * momentum.y; fc_momentum_x.z = velocity.x * momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y * momentum.y + pressure; fc_momentum_y.z = velocity.y * momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z * momentum.z + pressure; float de_p = density_energy + pressure; fc_density_energy.x = velocity.x * de_p; fc_density_energy.y = velocity.y * de_p; fc_density_energy.z = velocity.z * de_p; } __device__ inline void compute_velocity(float &density, float3 &momentum, float3 &velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3 &velocity) { return velocity.x * velocity.x + velocity.y * velocity.y + velocity.z * velocity.z; } __device__ inline float compute_pressure(float &density, float &density_energy, float &speed_sqd) { return (float(GAMMA) - float(1.0f)) * (density_energy - float(0.5f) * density * speed_sqd); } __device__ inline float compute_speed_of_sound(float &density, float &pressure) { return sqrtf(float(GAMMA) * pressure / density); } __global__ void cuda_compute_step_factor(int nelr, float *variables, float *areas, float *step_factors) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY * nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM + 0) * nelr]; momentum.y = variables[i + (VAR_MOMENTUM + 1) * nelr]; momentum.z = variables[i + (VAR_MOMENTUM + 2) * nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time // stepping, this later would need to be divided by the area, so we just do it // all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float *variables, float *areas, float *step_factors) { dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); getLastCudaError("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x * blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY * nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution( density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; for (j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j * nelr]; normal.x = normals[i + (j + 0 * NNB) * nelr]; normal.y = normals[i + (j + 1 * NNB) * nelr]; normal.z = normals[i + (j + 2 * NNB) * nelr]; normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); if (nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY * nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution( density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor * (density_i - density_nb); flux_i_density_energy += factor * (density_energy_i - density_energy_nb); flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x); flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y); flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f) * normal.x; flux_i_density += factor * (momentum_nb.x + momentum_i.x); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x); factor = float(0.5f) * normal.y; flux_i_density += factor * (momentum_nb.y + momentum_i.y); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y); factor = float(0.5f) * normal.z; flux_i_density += factor * (momentum_nb.z + momentum_i.z); flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z); flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z); } else if (nb == -1) // a wing boundary { flux_i_momentum.x += normal.x * pressure_i; flux_i_momentum.y += normal.y * pressure_i; flux_i_momentum.z += normal.z * pressure_i; } else if (nb == -2) // a far field boundary { factor = float(0.5f) * normal.x; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f) * normal.y; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f) * normal.z; flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z); flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z); flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY * nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy; } void compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes) { dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3); cuda_compute_flux<<<Dg, Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); getLastCudaError("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes) { const int i = (blockDim.x * blockIdx.x + threadIdx.x); float factor = step_factors[i] / float(RK + 1 - j); variables[i + VAR_DENSITY * nelr] = old_variables[i + VAR_DENSITY * nelr] + factor * fluxes[i + VAR_DENSITY * nelr]; variables[i + VAR_DENSITY_ENERGY * nelr] = old_variables[i + VAR_DENSITY_ENERGY * nelr] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr]; variables[i + (VAR_MOMENTUM + 0) * nelr] = old_variables[i + (VAR_MOMENTUM + 0) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr]; variables[i + (VAR_MOMENTUM + 1) * nelr] = old_variables[i + (VAR_MOMENTUM + 1) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr]; variables[i + (VAR_MOMENTUM + 2) * nelr] = old_variables[i + (VAR_MOMENTUM + 2) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr]; } void time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes) { dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4); cuda_time_step<<<Dg, Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); getLastCudaError("update failed"); } /* * Main function */ int main(int argc, char **argv) { printf("WG size of kernel:initialize = %d, WG size of " "kernel:compute_step_factor = %d, WG size of kernel:compute_flux = " "%d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char *data_file_name = argv[1]; cudaDeviceProp prop; int dev; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaGetDevice(&dev)); checkCudaErrors(cudaGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA * ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach) * ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed * float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed * float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM + 0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM + 1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM + 2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY] * (float(0.5f) * (ff_speed * ff_speed)) + (ff_pressure / float(GAMMA - 1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable + VAR_MOMENTUM + 0); h_ff_momentum.y = *(h_ff_variable + VAR_MOMENTUM + 1); h_ff_momentum.z = *(h_ff_variable + VAR_MOMENTUM + 2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR * sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3))); checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3))); checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3))); checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3))); } int nel; int nelr; // read in domain geometry float *areas; int *elements_surrounding_elements; float *normals; { std::ifstream file(data_file_name); file >> nel; nelr = BLOCK_SIZE_0 * ((nel / BLOCK_SIZE_0) + std::min(1, nel % BLOCK_SIZE_0)); float *h_areas = new float[nelr]; int *h_elements_surrounding_elements = new int[nelr * NNB]; float *h_normals = new float[nelr * NDIM * NNB]; // read in data for (int i = 0; i < nel; i++) { file >> h_areas[i]; for (int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j * nelr]; if (h_elements_surrounding_elements[i + j * nelr] < 0) h_elements_surrounding_elements[i + j * nelr] = -1; h_elements_surrounding_elements[i + j * nelr]--; // it's coming in with // Fortran numbering for (int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k * NNB) * nelr]; h_normals[i + (j + k * NNB) * nelr] = -h_normals[i + (j + k * NNB) * nelr]; } } } // fill in remaining data int last = nel - 1; for (int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for (int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j * nelr] = h_elements_surrounding_elements[last + j * nelr]; for (int k = 0; k < NDIM; k++) h_normals[last + (j + k * NNB) * nelr] = h_normals[last + (j + k * NNB) * nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr * NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr * NNB); normals = alloc<float>(nelr * NDIM * NNB); upload<float>(normals, h_normals, nelr * NDIM * NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float *variables = alloc<float>(nelr * NVAR); initialize_variables(nelr, variables); float *old_variables = alloc<float>(nelr * NVAR); float *fluxes = alloc<float>(nelr * NVAR); float *step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset((void *)step_factors, 0, sizeof(float) * nelr); // make sure CUDA isn't still doing something before we start timing cudaDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = 0; // unsigned int timer = 0; // CUT_SAFE_CALL( cutCreateTimer( &timer)); // CUT_SAFE_CALL( cutStartTimer( timer)); sdkCreateTimer(&timer); sdkStartTimer(&timer); // Begin iterations for (int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr * NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); getLastCudaError("compute_step_factor failed"); for (int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); getLastCudaError("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); getLastCudaError("time_step failed"); } } cudaDeviceSynchronize(); // CUT_SAFE_CALL( cutStopTimer(timer) ); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer) / 1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
13f689440bb25ea1c1a99915b5be62f2e8dd073c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "myset.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned long long *p = NULL; hipMalloc(&p, XSIZE*YSIZE); unsigned long long v = 1; long long n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( myset), dim3(gridBlock),dim3(threadBlock), 0, 0, p,v,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( myset), dim3(gridBlock),dim3(threadBlock), 0, 0, p,v,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( myset), dim3(gridBlock),dim3(threadBlock), 0, 0, p,v,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
13f689440bb25ea1c1a99915b5be62f2e8dd073c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "myset.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned long long *p = NULL; cudaMalloc(&p, XSIZE*YSIZE); unsigned long long v = 1; long long n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); myset<<<gridBlock,threadBlock>>>(p,v,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { myset<<<gridBlock,threadBlock>>>(p,v,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { myset<<<gridBlock,threadBlock>>>(p,v,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
53ba1d8a9fdd792a7ce02b6555a7e897bbcbdbeb.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> // includes, project #include <hip/hip_runtime.h> #include "../../engines/cuda/utils.hh" #include "IPlookup_kernel.hh" #define IGNORED_IP 0xFFffFFffu /* Compatibility definitions. */ #include "../../engines/cuda/compat.hh" extern "C" { /* The index is given by the order in get_used_datablocks(). */ #define dbid_ipv4_dest_addrs_d (0) #define dbid_ipv4_lookup_results_d (1) __device__ uint32_t ntohl(uint32_t n) { return ((n & 0xff000000) >> 24) | ((n & 0x00ff0000) >> 8) | \ ((n & 0x0000ff00) << 8) | ((n & 0x000000ff) << 24); } /* The GPU kernel. */ __global__ void ipv4_route_lookup_cuda( struct datablock_kernel_arg *datablocks, uint32_t count, uint16_t *batch_ids, uint16_t *item_ids, uint8_t *checkbits_d, uint16_t* __restrict__ TBL24_d, uint16_t* __restrict__ TBLlong_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { uint16_t batch_idx = batch_ids[idx]; uint16_t item_idx = item_ids[idx]; struct datablock_kernel_arg *db_dest_addrs = &datablocks[dbid_ipv4_dest_addrs_d]; struct datablock_kernel_arg *db_results = &datablocks[dbid_ipv4_lookup_results_d]; uint32_t daddr = ((uint32_t*) db_dest_addrs->buffer_bases_in[batch_idx])[item_idx]; uint16_t *lookup_result = &((uint16_t *) db_results->buffer_bases_out[batch_idx])[item_idx]; if (daddr == IGNORED_IP) { *lookup_result = 0; } else { daddr = ntohl(daddr); uint16_t temp_dest = TBL24_d[daddr >> 8]; if (temp_dest & 0x8000u) { int index2 = (((uint32_t)(temp_dest & 0x7fff)) << 8) + (daddr & 0xff); temp_dest = TBLlong_d[index2]; } *lookup_result = temp_dest; } } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != NULL) { checkbits_d[blockIdx.x] = 1; } } } void *nba::ipv4_route_lookup_get_cuda_kernel() { return reinterpret_cast<void *> (ipv4_route_lookup_cuda); } // vim: ts=8 sts=4 sw=4 et
53ba1d8a9fdd792a7ce02b6555a7e897bbcbdbeb.cu
#include <assert.h> #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> // includes, project #include <cuda.h> #include "../../engines/cuda/utils.hh" #include "IPlookup_kernel.hh" #define IGNORED_IP 0xFFffFFffu /* Compatibility definitions. */ #include "../../engines/cuda/compat.hh" extern "C" { /* The index is given by the order in get_used_datablocks(). */ #define dbid_ipv4_dest_addrs_d (0) #define dbid_ipv4_lookup_results_d (1) __device__ uint32_t ntohl(uint32_t n) { return ((n & 0xff000000) >> 24) | ((n & 0x00ff0000) >> 8) | \ ((n & 0x0000ff00) << 8) | ((n & 0x000000ff) << 24); } /* The GPU kernel. */ __global__ void ipv4_route_lookup_cuda( struct datablock_kernel_arg *datablocks, uint32_t count, uint16_t *batch_ids, uint16_t *item_ids, uint8_t *checkbits_d, uint16_t* __restrict__ TBL24_d, uint16_t* __restrict__ TBLlong_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { uint16_t batch_idx = batch_ids[idx]; uint16_t item_idx = item_ids[idx]; struct datablock_kernel_arg *db_dest_addrs = &datablocks[dbid_ipv4_dest_addrs_d]; struct datablock_kernel_arg *db_results = &datablocks[dbid_ipv4_lookup_results_d]; uint32_t daddr = ((uint32_t*) db_dest_addrs->buffer_bases_in[batch_idx])[item_idx]; uint16_t *lookup_result = &((uint16_t *) db_results->buffer_bases_out[batch_idx])[item_idx]; if (daddr == IGNORED_IP) { *lookup_result = 0; } else { daddr = ntohl(daddr); uint16_t temp_dest = TBL24_d[daddr >> 8]; if (temp_dest & 0x8000u) { int index2 = (((uint32_t)(temp_dest & 0x7fff)) << 8) + (daddr & 0xff); temp_dest = TBLlong_d[index2]; } *lookup_result = temp_dest; } } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != NULL) { checkbits_d[blockIdx.x] = 1; } } } void *nba::ipv4_route_lookup_get_cuda_kernel() { return reinterpret_cast<void *> (ipv4_route_lookup_cuda); } // vim: ts=8 sts=4 sw=4 et
aa7dd99429d258f428dd990be712f214ad3161ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cutil_inline.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "../benchmark_common.h" #include "CudaRandomAccess.h" /* Number of updates to table (suggested: 4x number of table entries) */ #define UP_REPEAT 4 #define NUPDATE (UP_REPEAT * TableSize) #define THREAD_BLOCK 128 #define NTHREAD_BLOCKS 512 #define MAX_VL 128 inline double RTSEC() { struct timeval tv; gettimeofday(&tv, NULL); return ((double)tv.tv_sec + ((double)tv.tv_usec) / 1000000); } // int main() int main_gups(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Params_t params; double GUPs; int failure; hipSetDevice(cutGetMaxGflopsDeviceId()); params.HPLMaxProcMem = TOTAL_MEM; params.outFname = "RA_output"; HPCC_RandomAccess(&params, 0, &GUPs, &failure, stream_app, mutexapp, flag); return 0; } /* Utility routine to start random number generator at Nth step */ u64Int HPCC_starts(s64Int n) { int i, j; u64Int m2[64]; u64Int temp, ran; while (n < 0) n += PERIOD; while (n > PERIOD) n -= PERIOD; if (n == 0) return 0x1; temp = 0x1; for (i = 0; i < 64; i++) { m2[i] = temp; temp = (temp << 1) ^ ((s64Int)temp < 0 ? POLY : 0); temp = (temp << 1) ^ ((s64Int)temp < 0 ? POLY : 0); } for (i = 62; i >= 0; i--) if ((n >> i) & 1) break; ran = 0x2; while (i > 0) { temp = 0; for (j = 0; j < 64; j++) if ((ran >> j) & 1) temp ^= m2[j]; ran = temp; i -= 1; if ((n >> i) & 1) ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); } return ran; } #ifdef TORCH_HIP_VERSION // trick to let us access the 64-bit simulator xor intrinsic #define NF_ATOMIC #ifdef NF_ATOMIC extern "C" __device__ __noinline__ void _Z_intrinsic_atom_global_xor_nf_i64( void* address, unsigned long long int data) { atomicXor(0 + (unsigned int*)address, *(0 + (unsigned int*)&data)); atomicXor(1 + (unsigned int*)address, *(1 + (unsigned int*)&data)); } #else extern "C" __device__ __noinline__ unsigned long long int _Z_intrinsic_atom_global_xor_i64(void* address, unsigned long long int data) { unsigned long long int previous = *(unsigned long long int*)address; atomicXor(0 + (unsigned int*)address, *(0 + (unsigned int*)&data)); atomicXor(1 + (unsigned int*)address, *(1 + (unsigned int*)&data)); return previous; } #endif // CUDA kernel __global__ static void RandomAccessUpdate(u64Int TableSize, u64Int* Table, u64Int* starts) { u64Int i; u64Int ran; /* Current random number */ /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ // ran = HPCC_starts ((NUPDATE/THREAD_BLOCK/NTHREAD_BLOCKS) * (blockIdx.x * // blockDim.x + threadIdx.x)); ran = starts[blockIdx.x * blockDim.x + threadIdx.x]; for (i = 0; i < NUPDATE / THREAD_BLOCK / NTHREAD_BLOCKS; i++) { ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); u64Int index = ran & (TableSize - 1); #ifdef ATOMIC_64_BIT_XOR #ifdef ATOMIC_INTRINSIC #ifdef NF_ATOMIC _Z_intrinsic_atom_global_xor_nf_i64(&Table[index], ran); #else _Z_intrinsic_atom_global_xor_i64(&Table[index], ran); #endif #else atomicXor(&Table[index], ran); #endif #else atomicXor(0 + (unsigned int*)&Table[index], *(0 + (unsigned int*)&ran)); atomicXor(1 + (unsigned int*)&Table[index], *(1 + (unsigned int*)&ran)); #endif } } #else #ifdef C_VECTOR_VERSION // MAX_VL element vector version static void RandomAccessUpdate(u64Int TableSize, u64Int* Table) { u64Int i; u64Int ran[MAX_VL]; /* Current random numbers */ int j; /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ for (j = 0; j < MAX_VL; j++) ran[j] = HPCC_starts((NUPDATE / MAX_VL) * j); for (i = 0; i < NUPDATE / MAX_VL; i++) { for (j = 0; j < MAX_VL; j++) { ran[j] = (ran[j] << 1) ^ ((s64Int)ran[j] < 0 ? POLY : 0); Table[ran[j] & (TableSize - 1)] ^= ran[j]; } } } #else // strict scalar version static void RandomAccessUpdate(u64Int TableSize, u64Int* Table) { u64Int i; u64Int ran; /* Current random number */ /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ ran = 0x1; for (i = 0; i < NUPDATE; i++) { ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); Table[ran & (TableSize - 1)] ^= ran; } } #endif #endif int HPCC_RandomAccess(Params_t* params, int doIO, double* GUPs, int* failure, hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { u64Int i; u64Int temp; double cputime; /* CPU time to update table */ double realtime; /* Real time to update table */ double totalMem; u64Int* Table; u64Int logTableSize, TableSize; FILE* outFile = NULL; if (doIO) { outFile = fopen(params->outFname, "a"); if (!outFile) { outFile = stderr; fprintf(outFile, "Cannot open output file.\n"); return 1; } } /* calculate local memory per node for the update table */ totalMem = params->HPLMaxProcMem; totalMem /= sizeof(u64Int); /* calculate the size of update array (must be a power of 2) */ for (totalMem *= 0.5, logTableSize = 0, TableSize = 1; totalMem >= 1.0; totalMem *= 0.5, logTableSize++, TableSize <<= 1) ; /* EMPTY */ Table = (typeof(Table))malloc(sizeof(u64Int) * TableSize); if (!Table) { if (doIO) { fprintf(outFile, "Failed to allocate memory for the update table (" FSTR64 ").\n", TableSize); fclose(outFile); } return 1; } /* Print parameters for run */ if (doIO) { fprintf(outFile, "------------------------------------------------------------\n"); #ifdef TORCH_HIP_VERSION fprintf(outFile, "GPU version\n"); fprintf(outFile, "CTA_SIZE = %d, CTAS = %d\n", THREAD_BLOCK, NTHREAD_BLOCKS); hipDeviceProp_t deviceProp; cutilSafeCall(hipGetDeviceProperties(&deviceProp, 0)); fprintf(outFile, "Device Name = %s\n", deviceProp.name); #else #ifdef C_VECTOR_VERSION fprintf(outFile, "CPU Vector version\n"); fprintf(outFile, "Vector length\n"); #else fprintf(outFile, "CPU non-Vector version\n"); #endif #endif fprintf(outFile, "Main table size = 2^" FSTR64 " = " FSTR64 " words\n", logTableSize, TableSize); fprintf(outFile, "Number of updates = " FSTR64 "\n", NUPDATE); } /* Initialize main table */ for (i = 0; i < TableSize; i++) Table[i] = i; #ifdef TORCH_HIP_VERSION u64Int starts[THREAD_BLOCK * NTHREAD_BLOCKS]; for (i = 0; i < (THREAD_BLOCK * NTHREAD_BLOCKS); i++) starts[i] = HPCC_starts((NUPDATE / THREAD_BLOCK / NTHREAD_BLOCKS) * i); #endif /* Begin timing here */ #if 0 cputime = -CPUSEC(); #else cputime = 0; #endif realtime = -RTSEC(); #ifdef TORCH_HIP_VERSION int size = sizeof(u64Int) * TableSize; u64Int* d_Table; u64Int* d_starts; cutilSafeCall(hipMalloc(&d_Table, size)); printf("sizeof(starts) = %lld", sizeof(starts)); cutilSafeCall(hipMalloc(&d_starts, sizeof(starts))); cutilSafeCall(hipMemcpyAsync(d_Table, Table, size, hipMemcpyHostToDevice, stream_app)); cutilSafeCall(hipMemcpyAsync(d_starts, starts, sizeof(starts), hipMemcpyHostToDevice, stream_app)); hipLaunchKernelGGL(( RandomAccessUpdate), dim3(NTHREAD_BLOCKS), dim3(THREAD_BLOCK), 0, stream_app, TableSize, d_Table, d_starts); cutilSafeCall(hipGetLastError()); cutilSafeCall(hipMemcpyAsync(Table, d_Table, size, hipMemcpyDeviceToHost, stream_app)); #else RandomAccessUpdate(TableSize, Table); #endif pthread_mutex_unlock(mutexapp); // cutilSafeCall( hipStreamSynchronize(stream_app) ); if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); #if 0 /* End timed section */ cputime += CPUSEC(); #endif realtime += RTSEC(); /* make sure no division by zero */ *GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0); *GUPs *= 1e-9 * NUPDATE; /* Print timing results */ if (doIO) { fprintf(outFile, "CPU time used = %.6f seconds\n", cputime); fprintf(outFile, "Real time used = %.6f seconds\n", realtime); fprintf(outFile, "%.9f Billion(10^9) Updates per second [GUP/s]\n", *GUPs); } /* Verification of results (in serial or "safe" mode; optional) */ temp = 0x1; for (i = 0; i < NUPDATE; i++) { temp = (temp << 1) ^ (((s64Int)temp < 0) ? POLY : 0); Table[temp & (TableSize - 1)] ^= temp; } temp = 0; for (i = 0; i < TableSize; i++) if (Table[i] != i) temp++; if (doIO) { fprintf(outFile, "Found " FSTR64 " errors in " FSTR64 " locations (%s).\n", temp, TableSize, (temp <= 0.01 * TableSize) ? "passed" : "failed"); } if (temp <= 0.01 * TableSize) *failure = 0; else *failure = 1; free(Table); #ifdef TORCH_HIP_VERSION cutilSafeCall(hipFree(d_Table)); #endif if (doIO) { fflush(outFile); fclose(outFile); } return 0; }
aa7dd99429d258f428dd990be712f214ad3161ac.cu
#include <cutil_inline.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "../benchmark_common.h" #include "CudaRandomAccess.h" /* Number of updates to table (suggested: 4x number of table entries) */ #define UP_REPEAT 4 #define NUPDATE (UP_REPEAT * TableSize) #define THREAD_BLOCK 128 #define NTHREAD_BLOCKS 512 #define MAX_VL 128 inline double RTSEC() { struct timeval tv; gettimeofday(&tv, NULL); return ((double)tv.tv_sec + ((double)tv.tv_usec) / 1000000); } // int main() int main_gups(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { Params_t params; double GUPs; int failure; cudaSetDevice(cutGetMaxGflopsDeviceId()); params.HPLMaxProcMem = TOTAL_MEM; params.outFname = "RA_output"; HPCC_RandomAccess(&params, 0, &GUPs, &failure, stream_app, mutexapp, flag); return 0; } /* Utility routine to start random number generator at Nth step */ u64Int HPCC_starts(s64Int n) { int i, j; u64Int m2[64]; u64Int temp, ran; while (n < 0) n += PERIOD; while (n > PERIOD) n -= PERIOD; if (n == 0) return 0x1; temp = 0x1; for (i = 0; i < 64; i++) { m2[i] = temp; temp = (temp << 1) ^ ((s64Int)temp < 0 ? POLY : 0); temp = (temp << 1) ^ ((s64Int)temp < 0 ? POLY : 0); } for (i = 62; i >= 0; i--) if ((n >> i) & 1) break; ran = 0x2; while (i > 0) { temp = 0; for (j = 0; j < 64; j++) if ((ran >> j) & 1) temp ^= m2[j]; ran = temp; i -= 1; if ((n >> i) & 1) ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); } return ran; } #ifdef CUDA_VERSION // trick to let us access the 64-bit simulator xor intrinsic #define NF_ATOMIC #ifdef NF_ATOMIC extern "C" __device__ __noinline__ void _Z_intrinsic_atom_global_xor_nf_i64( void* address, unsigned long long int data) { atomicXor(0 + (unsigned int*)address, *(0 + (unsigned int*)&data)); atomicXor(1 + (unsigned int*)address, *(1 + (unsigned int*)&data)); } #else extern "C" __device__ __noinline__ unsigned long long int _Z_intrinsic_atom_global_xor_i64(void* address, unsigned long long int data) { unsigned long long int previous = *(unsigned long long int*)address; atomicXor(0 + (unsigned int*)address, *(0 + (unsigned int*)&data)); atomicXor(1 + (unsigned int*)address, *(1 + (unsigned int*)&data)); return previous; } #endif // CUDA kernel __global__ static void RandomAccessUpdate(u64Int TableSize, u64Int* Table, u64Int* starts) { u64Int i; u64Int ran; /* Current random number */ /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ // ran = HPCC_starts ((NUPDATE/THREAD_BLOCK/NTHREAD_BLOCKS) * (blockIdx.x * // blockDim.x + threadIdx.x)); ran = starts[blockIdx.x * blockDim.x + threadIdx.x]; for (i = 0; i < NUPDATE / THREAD_BLOCK / NTHREAD_BLOCKS; i++) { ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); u64Int index = ran & (TableSize - 1); #ifdef ATOMIC_64_BIT_XOR #ifdef ATOMIC_INTRINSIC #ifdef NF_ATOMIC _Z_intrinsic_atom_global_xor_nf_i64(&Table[index], ran); #else _Z_intrinsic_atom_global_xor_i64(&Table[index], ran); #endif #else atomicXor(&Table[index], ran); #endif #else atomicXor(0 + (unsigned int*)&Table[index], *(0 + (unsigned int*)&ran)); atomicXor(1 + (unsigned int*)&Table[index], *(1 + (unsigned int*)&ran)); #endif } } #else #ifdef C_VECTOR_VERSION // MAX_VL element vector version static void RandomAccessUpdate(u64Int TableSize, u64Int* Table) { u64Int i; u64Int ran[MAX_VL]; /* Current random numbers */ int j; /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ for (j = 0; j < MAX_VL; j++) ran[j] = HPCC_starts((NUPDATE / MAX_VL) * j); for (i = 0; i < NUPDATE / MAX_VL; i++) { for (j = 0; j < MAX_VL; j++) { ran[j] = (ran[j] << 1) ^ ((s64Int)ran[j] < 0 ? POLY : 0); Table[ran[j] & (TableSize - 1)] ^= ran[j]; } } } #else // strict scalar version static void RandomAccessUpdate(u64Int TableSize, u64Int* Table) { u64Int i; u64Int ran; /* Current random number */ /* Perform updates to main table. The scalar equivalent is: * * u64Int ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ ran = 0x1; for (i = 0; i < NUPDATE; i++) { ran = (ran << 1) ^ ((s64Int)ran < 0 ? POLY : 0); Table[ran & (TableSize - 1)] ^= ran; } } #endif #endif int HPCC_RandomAccess(Params_t* params, int doIO, double* GUPs, int* failure, cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { u64Int i; u64Int temp; double cputime; /* CPU time to update table */ double realtime; /* Real time to update table */ double totalMem; u64Int* Table; u64Int logTableSize, TableSize; FILE* outFile = NULL; if (doIO) { outFile = fopen(params->outFname, "a"); if (!outFile) { outFile = stderr; fprintf(outFile, "Cannot open output file.\n"); return 1; } } /* calculate local memory per node for the update table */ totalMem = params->HPLMaxProcMem; totalMem /= sizeof(u64Int); /* calculate the size of update array (must be a power of 2) */ for (totalMem *= 0.5, logTableSize = 0, TableSize = 1; totalMem >= 1.0; totalMem *= 0.5, logTableSize++, TableSize <<= 1) ; /* EMPTY */ Table = (typeof(Table))malloc(sizeof(u64Int) * TableSize); if (!Table) { if (doIO) { fprintf(outFile, "Failed to allocate memory for the update table (" FSTR64 ").\n", TableSize); fclose(outFile); } return 1; } /* Print parameters for run */ if (doIO) { fprintf(outFile, "------------------------------------------------------------\n"); #ifdef CUDA_VERSION fprintf(outFile, "GPU version\n"); fprintf(outFile, "CTA_SIZE = %d, CTAS = %d\n", THREAD_BLOCK, NTHREAD_BLOCKS); cudaDeviceProp deviceProp; cutilSafeCall(cudaGetDeviceProperties(&deviceProp, 0)); fprintf(outFile, "Device Name = %s\n", deviceProp.name); #else #ifdef C_VECTOR_VERSION fprintf(outFile, "CPU Vector version\n"); fprintf(outFile, "Vector length\n"); #else fprintf(outFile, "CPU non-Vector version\n"); #endif #endif fprintf(outFile, "Main table size = 2^" FSTR64 " = " FSTR64 " words\n", logTableSize, TableSize); fprintf(outFile, "Number of updates = " FSTR64 "\n", NUPDATE); } /* Initialize main table */ for (i = 0; i < TableSize; i++) Table[i] = i; #ifdef CUDA_VERSION u64Int starts[THREAD_BLOCK * NTHREAD_BLOCKS]; for (i = 0; i < (THREAD_BLOCK * NTHREAD_BLOCKS); i++) starts[i] = HPCC_starts((NUPDATE / THREAD_BLOCK / NTHREAD_BLOCKS) * i); #endif /* Begin timing here */ #if 0 cputime = -CPUSEC(); #else cputime = 0; #endif realtime = -RTSEC(); #ifdef CUDA_VERSION int size = sizeof(u64Int) * TableSize; u64Int* d_Table; u64Int* d_starts; cutilSafeCall(cudaMalloc(&d_Table, size)); printf("sizeof(starts) = %lld", sizeof(starts)); cutilSafeCall(cudaMalloc(&d_starts, sizeof(starts))); cutilSafeCall(cudaMemcpyAsync(d_Table, Table, size, cudaMemcpyHostToDevice, stream_app)); cutilSafeCall(cudaMemcpyAsync(d_starts, starts, sizeof(starts), cudaMemcpyHostToDevice, stream_app)); RandomAccessUpdate<<<NTHREAD_BLOCKS, THREAD_BLOCK, 0, stream_app>>>( TableSize, d_Table, d_starts); cutilSafeCall(cudaGetLastError()); cutilSafeCall(cudaMemcpyAsync(Table, d_Table, size, cudaMemcpyDeviceToHost, stream_app)); #else RandomAccessUpdate(TableSize, Table); #endif pthread_mutex_unlock(mutexapp); // cutilSafeCall( cudaStreamSynchronize(stream_app) ); if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); #if 0 /* End timed section */ cputime += CPUSEC(); #endif realtime += RTSEC(); /* make sure no division by zero */ *GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0); *GUPs *= 1e-9 * NUPDATE; /* Print timing results */ if (doIO) { fprintf(outFile, "CPU time used = %.6f seconds\n", cputime); fprintf(outFile, "Real time used = %.6f seconds\n", realtime); fprintf(outFile, "%.9f Billion(10^9) Updates per second [GUP/s]\n", *GUPs); } /* Verification of results (in serial or "safe" mode; optional) */ temp = 0x1; for (i = 0; i < NUPDATE; i++) { temp = (temp << 1) ^ (((s64Int)temp < 0) ? POLY : 0); Table[temp & (TableSize - 1)] ^= temp; } temp = 0; for (i = 0; i < TableSize; i++) if (Table[i] != i) temp++; if (doIO) { fprintf(outFile, "Found " FSTR64 " errors in " FSTR64 " locations (%s).\n", temp, TableSize, (temp <= 0.01 * TableSize) ? "passed" : "failed"); } if (temp <= 0.01 * TableSize) *failure = 0; else *failure = 1; free(Table); #ifdef CUDA_VERSION cutilSafeCall(cudaFree(d_Table)); #endif if (doIO) { fflush(outFile); fclose(outFile); } return 0; }
473007e9f085a2b76f245f4027330975aa98d166.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define MYDEBUG #ifdef MYDEBUG #define DEBUG_PRINT printf("here: %d\n", __LINE__); fflush(stdout); #else #define DEBUG_PRINT #endif #define SIZE 10240 __global__ void MyKernel(int *a, int *b, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) b[id] = (a[id] >> 1) + a[id]; } int foo(int *a, int *b, int size) { int i; int BlockSize = 256; int BlockNum = (size + BlockSize - 1) / BlockSize; int sum = 0; for(i = 0; i < size; i++) { a[i] += rand() % 50; sum += a[i]; } hipLaunchKernelGGL(( MyKernel), dim3(BlockNum), dim3(BlockSize), 0, 0, a, b, size); hipDeviceSynchronize(); DEBUG_PRINT return sum; } int main() { int i; int size = SIZE; int *d_a, *d_b; int sum_a, sum_b; sum_a = sum_b = 0; DEBUG_PRINT hipMallocManaged((void **)&d_a, size*sizeof(int)); hipMallocManaged((void **)&d_b, size*sizeof(int)); DEBUG_PRINT for(i = 0; i < size; i++) { d_a[i] = rand() % 100; } DEBUG_PRINT sum_a = foo(d_a, d_b, size); for(i = 0; i < size; i++) sum_b += d_b[i]; DEBUG_PRINT hipFree(d_a); hipFree(d_b); DEBUG_PRINT printf("sum_a: %d, sum_b: %d\n", sum_a, sum_b); return 0; }
473007e9f085a2b76f245f4027330975aa98d166.cu
#include <stdio.h> #include <stdlib.h> #define MYDEBUG #ifdef MYDEBUG #define DEBUG_PRINT printf("here: %d\n", __LINE__); fflush(stdout); #else #define DEBUG_PRINT #endif #define SIZE 10240 __global__ void MyKernel(int *a, int *b, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) b[id] = (a[id] >> 1) + a[id]; } int foo(int *a, int *b, int size) { int i; int BlockSize = 256; int BlockNum = (size + BlockSize - 1) / BlockSize; int sum = 0; for(i = 0; i < size; i++) { a[i] += rand() % 50; sum += a[i]; } MyKernel<<<BlockNum, BlockSize>>>(a, b, size); cudaDeviceSynchronize(); DEBUG_PRINT return sum; } int main() { int i; int size = SIZE; int *d_a, *d_b; int sum_a, sum_b; sum_a = sum_b = 0; DEBUG_PRINT cudaMallocManaged((void **)&d_a, size*sizeof(int)); cudaMallocManaged((void **)&d_b, size*sizeof(int)); DEBUG_PRINT for(i = 0; i < size; i++) { d_a[i] = rand() % 100; } DEBUG_PRINT sum_a = foo(d_a, d_b, size); for(i = 0; i < size; i++) sum_b += d_b[i]; DEBUG_PRINT cudaFree(d_a); cudaFree(d_b); DEBUG_PRINT printf("sum_a: %d, sum_b: %d\n", sum_a, sum_b); return 0; }
238cb4c9176b4015488d0e00b0742340ecd5ee42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "AddVector.h" #include <iostream> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) : ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n) { this->sizeOctet = n * sizeof(float); // octet // MM { // MM (malloc Device) { // Allocation de la mmoire Device::malloc(&ptrDevV1, sizeOctet); Device::malloc(&ptrDevV2, sizeOctet); Device::malloc(&ptrDevW, sizeOctet); // Suppression de la zone mmoire pointe Device::memclear(ptrDevV1, sizeOctet); Device::memclear(ptrDevV2, sizeOctet); Device::memclear(ptrDevW, sizeOctet); } // MM (copy Host->Device) { // Ce qui est droite va vers ce qui est gauche donc // Ce qui vient du Host va vers le device Device::memcpyHToD(ptrDevV1, ptrV1, sizeOctet); Device::memcpyHToD(ptrDevV2, ptrV2, sizeOctet); } Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif } // Grid { this->dg = grid.dg; this->db = grid.db; } } AddVector::~AddVector(void) { //MM (device free) { Device::free(ptrDevV1); Device::free(ptrDevV2); Device::free(ptrDevW); Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif } } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ void AddVector::run() { Device::lastCudaError("addVecteur (before)"); // temp debug hipLaunchKernelGGL(( addVector), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone Device::lastCudaError("addVecteur (after)"); // temp debug Device::synchronize(); // Temp,debug, only for printf in GPU // MM (Device -> Host) { Device::memcpyDToH(ptrW, ptrDevW, sizeOctet); // barriere synchronisation implicite } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
238cb4c9176b4015488d0e00b0742340ecd5ee42.cu
#include "AddVector.h" #include <iostream> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ AddVector::AddVector(const Grid& grid, float* ptrV1, float* ptrV2, float* ptrW, int n) : ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n) { this->sizeOctet = n * sizeof(float); // octet // MM { // MM (malloc Device) { // Allocation de la mémoire Device::malloc(&ptrDevV1, sizeOctet); Device::malloc(&ptrDevV2, sizeOctet); Device::malloc(&ptrDevW, sizeOctet); // Suppression de la zone mémoire pointée Device::memclear(ptrDevV1, sizeOctet); Device::memclear(ptrDevV2, sizeOctet); Device::memclear(ptrDevW, sizeOctet); } // MM (copy Host->Device) { // Ce qui est à droite va vers ce qui est à gauche donc // Ce qui vient du Host va vers le device Device::memcpyHToD(ptrDevV1, ptrV1, sizeOctet); Device::memcpyHToD(ptrDevV2, ptrV2, sizeOctet); } Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif } // Grid { this->dg = grid.dg; this->db = grid.db; } } AddVector::~AddVector(void) { //MM (device free) { Device::free(ptrDevV1); Device::free(ptrDevV2); Device::free(ptrDevW); Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif } } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ void AddVector::run() { Device::lastCudaError("addVecteur (before)"); // temp debug addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone Device::lastCudaError("addVecteur (after)"); // temp debug Device::synchronize(); // Temp,debug, only for printf in GPU // MM (Device -> Host) { Device::memcpyDToH(ptrW, ptrDevW, sizeOctet); // barriere synchronisation implicite } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
fd6bcabd1962410ba81c5ebae9ad8d679822ea06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/activations/log_softmax.hpp" #ifdef LBANN_HAS_DNN_LIB #include "lbann/utils/dnn_lib/softmax.hpp" #endif // LBANN_HAS_DNN_LIB namespace lbann { namespace { /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; /** @brief Kernel for max reduction on matrix columns * * Each CUDA block computes the max over a subset of matrix entries * and outputs the result. This is repeated multiple times for * column-wise max reduction. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param values (height x width) matrix * @param max_values (nblocksx x width) matrix */ template <size_t bsize, typename TensorDataType> __global__ void reduce_max_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ max_values) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Find largest value for each thread TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& val = values[row + col * values_ldim]; thread_max_val = gpu_lib::max(thread_max_val, val); } // Find largest value for each block const TensorDataType block_max_val = gpu_lib::block_reduce<bsize, 1, 1, DataType, max_op<DataType>>( thread_max_val); if (tid == 0) { max_values[bidx + col * nblocksx] = block_max_val; } } } /** @brief Kernel for matrix column sums * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param sums On input, array of zeros. On output, sum(x) for each * column. */ template <size_t bsize, typename TensorDataType> __global__ void reduce_sum_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { thread_sum += values[row + col * values_ldim]; } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize, 1, 1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute sum(exp(x-shift)) for each matrix column * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param shifts max(x) for each column * @param sums On input, array of zeros. On output, * sum(exp(x-shift)) for each column. */ template <size_t bsize, typename TensorDataType> __global__ void fp_sumexp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { const auto& shift = shifts[col]; // Exponentiate inputs and compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; thread_sum += gpu_lib::exp(x - shift); } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize, 1, 1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute layer output * * y = x - shift - log(sum(x-shift)) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param shifts max(x) for each column * @param sums sum(exp(x-shift)) for each column */ template <typename TensorDataType> __global__ void fp_output_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ shifts, const TensorDataType* __restrict__ sums) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& shift = shifts[col]; const TensorDataType log_sum_exp = gpu_lib::log(sums[col]); for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; auto& y = output[row + col * output_ldim]; y = x - shift - log_sum_exp; } } } /** @brief Compute gradient w.r.t. input * * dx = dy - softmax(x) * sum(dy) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param sums Column sums of the gradient w.r.t. output */ template <typename TensorDataType> __global__ void bp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ sums, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& sum = sums[col]; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row + col * output_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dy - gpu_lib::exp(y) * sum; } } } } // namespace template <typename TensorDataType> void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { const TensorDataType zero = 0; const TensorDataType one = 1; const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>( l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>( l.get_local_activations()); dnn_lib::softmax_forward(one, l.m_tensors_dnn_desc.get_prev_activations(), local_input, zero, l.m_tensors_dnn_desc.get_activations(), local_output, softmax_mode::INSTANCE, softmax_alg::LOG); } template <typename TensorDataType> void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; const TensorDataType zero = 0; const TensorDataType one = 1; const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals()); dnn_lib::softmax_backward(one, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_tensors_dnn_desc.get_prev_error_signals(), local_gradient_wrt_output, zero, l.m_tensors_dnn_desc.get_error_signals(), local_gradient_wrt_input, softmax_mode::INSTANCE, softmax_alg::LOG); } template <typename TensorDataType> void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Setup workspace l.m_workspace->Empty(false); l.m_workspace->AlignWith(l.get_activations()); l.m_workspace->Resize(1, l.get_activations().Width()); // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations()); auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix()); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_workspace)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Find max value in each column gpu_lib::thrust::vector<TensorDataType> max_vals; if (local_input.IsEmpty()) { max_vals.resize(local_width, -std::numeric_limits<DataType>::infinity()); } else { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; max_vals.resize(grid_dims.x * local_width); // Launch GPU Kernel hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get()); while (grid_dims.x > 1) { const size_t prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals)); max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), prev_height, max_vals.data().get()); } } El::mpi::AllReduce(max_vals.data().get(), max_vals.size(), El::mpi::MAX, l.m_workspace->RedundantComm(), sync_info); // Compute sum(exp(x-max_val)) for each column El::Zero(*l.m_workspace); if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_sumexp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get(), local_workspace.Buffer()); } l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute output // Note: y = x - max_val - log(sum(exp(x-max_val))) if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), max_vals.data().get(), local_workspace.LockedBuffer()); } } template <typename TensorDataType> void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals()); auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix()); const auto& local_height = local_output.Height(); const auto& local_width = local_output.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_workspace)); // Compute sum of entries in gradient w.r.t. output El::Zero(local_workspace); if (!local_gradient_wrt_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(reduce_sum_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer()); } l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute gradient w.r.t. input if (!local_gradient_wrt_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(bp_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() { fp_compute_impl(*this); } template <typename TensorDataType, data_layout Layout, El::Device Device> void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() { bp_compute_impl(*this); } // Template instantiation #define PROTO(T) \ template class log_softmax_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class log_softmax_layer<T, \ data_layout::MODEL_PARALLEL, \ El::Device::GPU>; #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
fd6bcabd1962410ba81c5ebae9ad8d679822ea06.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/activations/log_softmax.hpp" #ifdef LBANN_HAS_DNN_LIB #include "lbann/utils/dnn_lib/softmax.hpp" #endif // LBANN_HAS_DNN_LIB namespace lbann { namespace { /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; /** @brief Kernel for max reduction on matrix columns * * Each CUDA block computes the max over a subset of matrix entries * and outputs the result. This is repeated multiple times for * column-wise max reduction. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param values (height x width) matrix * @param max_values (nblocksx x width) matrix */ template <size_t bsize, typename TensorDataType> __global__ void reduce_max_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ max_values) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Find largest value for each thread TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& val = values[row + col * values_ldim]; thread_max_val = gpu_lib::max(thread_max_val, val); } // Find largest value for each block const TensorDataType block_max_val = gpu_lib::block_reduce<bsize, 1, 1, DataType, max_op<DataType>>( thread_max_val); if (tid == 0) { max_values[bidx + col * nblocksx] = block_max_val; } } } /** @brief Kernel for matrix column sums * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param sums On input, array of zeros. On output, sum(x) for each * column. */ template <size_t bsize, typename TensorDataType> __global__ void reduce_sum_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { thread_sum += values[row + col * values_ldim]; } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize, 1, 1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute sum(exp(x-shift)) for each matrix column * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param shifts max(x) for each column * @param sums On input, array of zeros. On output, * sum(exp(x-shift)) for each column. */ template <size_t bsize, typename TensorDataType> __global__ void fp_sumexp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { const auto& shift = shifts[col]; // Exponentiate inputs and compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; thread_sum += gpu_lib::exp(x - shift); } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize, 1, 1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute layer output * * y = x - shift - log(sum(x-shift)) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param shifts max(x) for each column * @param sums sum(exp(x-shift)) for each column */ template <typename TensorDataType> __global__ void fp_output_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ shifts, const TensorDataType* __restrict__ sums) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& shift = shifts[col]; const TensorDataType log_sum_exp = gpu_lib::log(sums[col]); for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; auto& y = output[row + col * output_ldim]; y = x - shift - log_sum_exp; } } } /** @brief Compute gradient w.r.t. input * * dx = dy - softmax(x) * sum(dy) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param sums Column sums of the gradient w.r.t. output */ template <typename TensorDataType> __global__ void bp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ sums, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& sum = sums[col]; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row + col * output_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dy - gpu_lib::exp(y) * sum; } } } } // namespace template <typename TensorDataType> void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { const TensorDataType zero = 0; const TensorDataType one = 1; const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>( l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>( l.get_local_activations()); dnn_lib::softmax_forward(one, l.m_tensors_dnn_desc.get_prev_activations(), local_input, zero, l.m_tensors_dnn_desc.get_activations(), local_output, softmax_mode::INSTANCE, softmax_alg::LOG); } template <typename TensorDataType> void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; const TensorDataType zero = 0; const TensorDataType one = 1; const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals()); dnn_lib::softmax_backward(one, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_tensors_dnn_desc.get_prev_error_signals(), local_gradient_wrt_output, zero, l.m_tensors_dnn_desc.get_error_signals(), local_gradient_wrt_input, softmax_mode::INSTANCE, softmax_alg::LOG); } template <typename TensorDataType> void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Setup workspace l.m_workspace->Empty(false); l.m_workspace->AlignWith(l.get_activations()); l.m_workspace->Resize(1, l.get_activations().Width()); // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations()); auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix()); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_workspace)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Find max value in each column gpu_lib::thrust::vector<TensorDataType> max_vals; if (local_input.IsEmpty()) { max_vals.resize(local_width, -std::numeric_limits<DataType>::infinity()); } else { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; max_vals.resize(grid_dims.x * local_width); // Launch GPU Kernel hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get()); while (grid_dims.x > 1) { const size_t prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals)); max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel(reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), prev_height, max_vals.data().get()); } } El::mpi::AllReduce(max_vals.data().get(), max_vals.size(), El::mpi::MAX, l.m_workspace->RedundantComm(), sync_info); // Compute sum(exp(x-max_val)) for each column El::Zero(*l.m_workspace); if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_sumexp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get(), local_workspace.Buffer()); } l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute output // Note: y = x - max_val - log(sum(exp(x-max_val))) if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), max_vals.data().get(), local_workspace.LockedBuffer()); } } template <typename TensorDataType> void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals()); auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix()); const auto& local_height = local_output.Height(); const auto& local_width = local_output.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_workspace)); // Compute sum of entries in gradient w.r.t. output El::Zero(local_workspace); if (!local_gradient_wrt_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(reduce_sum_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer()); } l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute gradient w.r.t. input if (!local_gradient_wrt_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(bp_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() { fp_compute_impl(*this); } template <typename TensorDataType, data_layout Layout, El::Device Device> void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() { bp_compute_impl(*this); } // Template instantiation #define PROTO(T) \ template class log_softmax_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class log_softmax_layer<T, \ data_layout::MODEL_PARALLEL, \ El::Device::GPU>; #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
ddb40065e9f403efc6a84119e660174ba561376e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // 2018.11.10 // very slow average 200ms // every thread check for a Queen. not a pair[] // 2018.12.29 //copy form CheckOne_Datafile_compare/gpu_1_Queen #include "Kernel.h" //#define DEBUG #define BLOCK_SIZE 512 //----------------------------Kernel---------------------------------------- __global__ void Ker_Warm(){ // empty body, just warmup GPU; if(threadIdx.x == 0 ) printf("GPU is OK!\n"); } __global__ void Ker_Check_Combination ( int *d_combination, // int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. //printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid); if(tid >= combination_size) return; if(tid==0) d_result[0]=0; int curX=tid; int curY=d_combination[tid]; //check every queen after cur; for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){ int iY=d_combination[iX]; if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers. //printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid); atomicAdd ((unsigned int *)&d_result[0],1); // break; // get all conflicts } } }// end of Kernel //----------------------------CPU Interface---------------------------------------- void setDevice(int i) { checkCudaErrors( hipSetDevice( i ) ); } int getDevice() { int id=-1; checkCudaErrors( hipGetDevice( &id ) ); return id; } void warmGPU() { hipError_t cuda_err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float cuda_time=0; hipEventRecord(start, 0); hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1), 0, 0, ); cuda_err= hipSuccess; cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err)); #endif } //checkCudaErrors( hipDeviceSynchronize() ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif } unsigned int get_conflicts(int * combination, int combination_size) { // GPU //warmup<<<1, 1>>>(); // Create input data int *h_combination = 0; //store a number in [1~N] int *d_combination = 0; unsigned int * h_result = 0; unsigned int * d_result = 0; // timer //std::chrono::time_point<std::chrono::system_clock> c11_start, c11_end; //hipEvent_t start, stop; //hipEventCreate(&start); //hipEventCreate(&stop); //float cuda_time=0; //int cpu_time=0; //cuda status var; hipError_t cuda_err; // Allocate CPU memory and initialize data. // init h_combination & timer. //c11_start =std::chrono::system_clock::now(); //h_combination =(int *)malloc(combination_size * sizeof(int)); // need not allocate & free memory h_result =(unsigned int *)malloc( 1 * sizeof(unsigned int)); if(h_result==NULL ) { printf("malloc h_result error \ni"); exit(1); } h_combination= combination; cuda_err = hipSuccess; //hipEventRecord(start, 0); // allocate GPU mem checkCudaErrors(hipMalloc((void **)&d_combination, combination_size * sizeof(int))); checkCudaErrors(hipMalloc((void **)&d_result , 1 * sizeof(int))); cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "alloc d_combination error! (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } //else // fprintf(stderr, "alloc d_combination successed ! ( code= %s)!\n", hipGetErrorString(cuda_err)); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","GPU mem allocate time=",cuda_time); //************************************************************************************************************ //combination H->D //hipEventRecord(start, 0); checkCudaErrors(hipMemcpy(d_combination, h_combination, combination_size * sizeof(int), hipMemcpyHostToDevice)); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","combination[] tranfer time =", cuda_time); // Execute & timer //hipEventRecord(start, 0); hipLaunchKernelGGL(( Ker_Check_Combination), dim3(( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE) , dim3(BLOCK_SIZE) , 0, 0, d_combination ,combination_size , d_result); cuda_err= hipSuccess; cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { #ifdef DEBUG fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err)); #endif exit(EXIT_FAILURE); } #ifdef DEBUG else fprintf(stderr, "launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err)); #endif //checkCudaErrors(hipDeviceSynchronize()); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","CUDA Kernel run time=",cuda_time); // D->H and timer h_result[0]=0; //hipEventRecord(start, 0); checkCudaErrors(hipMemcpy(h_result, d_result, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost)); cuda_err = hipSuccess; cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { #ifdef DEBUG fprintf(stderr, "D->H error! (error code= %s)!\n", hipGetErrorString(cuda_err)); #endif exit(EXIT_FAILURE); } #ifdef DEBUG else fprintf(stderr, "D->H successed ! ( code= %s)!\n", hipGetErrorString(cuda_err)); #endif //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","CUDA D->H time = ", cuda_time); //printf("testORI Kernel OK! result= %d \n",h_result[0]); unsigned int conflicts = h_result[0]; //************************************************************************************************************ //free memory //hipEventDestroy(start); //hipEventDestroy(stop); // free(h_combination); //it is a pointer point to static array a[][], need not to free; free(h_result); checkCudaErrors( hipFree(d_combination) ); checkCudaErrors( hipFree(d_result) ); checkCudaErrors( hipDeviceSynchronize() ); //checkCudaErrors( hipDeviceReset() ); return conflicts; }
ddb40065e9f403efc6a84119e660174ba561376e.cu
// 2018.11.10 // very slow average 200ms // every thread check for a Queen. not a pair[] // 2018.12.29 //copy form CheckOne_Datafile_compare/gpu_1_Queen #include "Kernel.h" //#define DEBUG #define BLOCK_SIZE 512 //----------------------------Kernel---------------------------------------- __global__ void Ker_Warm(){ // empty body, just warmup GPU; if(threadIdx.x == 0 ) printf("GPU is OK!\n"); } __global__ void Ker_Check_Combination ( int *d_combination, // int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. //printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid); if(tid >= combination_size) return; if(tid==0) d_result[0]=0; int curX=tid; int curY=d_combination[tid]; //check every queen after cur; for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){ int iY=d_combination[iX]; if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers. //printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid); atomicAdd ((unsigned int *)&d_result[0],1); // break; // get all conflicts } } }// end of Kernel //----------------------------CPU Interface---------------------------------------- void setDevice(int i) { checkCudaErrors( cudaSetDevice( i ) ); } int getDevice() { int id=-1; checkCudaErrors( cudaGetDevice( &id ) ); return id; } void warmGPU() { cudaError_t cuda_err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float cuda_time=0; cudaEventRecord(start, 0); Ker_Warm <<<1,1>>> (); cuda_err= cudaSuccess; cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err)); #endif } //checkCudaErrors( cudaDeviceSynchronize() ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif } unsigned int get_conflicts(int * combination, int combination_size) { // 预热GPU //warmup<<<1, 1>>>(); // Create input data int *h_combination = 0; //store a number in [1~N] int *d_combination = 0; unsigned int * h_result = 0; unsigned int * d_result = 0; // timer //std::chrono::time_point<std::chrono::system_clock> c11_start, c11_end; //cudaEvent_t start, stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); //float cuda_time=0; //int cpu_time=0; //cuda status var; cudaError_t cuda_err; // Allocate CPU memory and initialize data. // init h_combination & timer. //c11_start =std::chrono::system_clock::now(); //h_combination =(int *)malloc(combination_size * sizeof(int)); // need not allocate & free memory h_result =(unsigned int *)malloc( 1 * sizeof(unsigned int)); if(h_result==NULL ) { printf("malloc h_result error \ni"); exit(1); } h_combination= combination; cuda_err = cudaSuccess; //cudaEventRecord(start, 0); // allocate GPU mem checkCudaErrors(cudaMalloc((void **)&d_combination, combination_size * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&d_result , 1 * sizeof(int))); cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "alloc d_combination error! (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } //else // fprintf(stderr, "alloc d_combination successed ! ( code= %s)!\n", cudaGetErrorString(cuda_err)); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","GPU mem allocate time=",cuda_time); //************************************************************************************************************ //combination H->D //cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(d_combination, h_combination, combination_size * sizeof(int), cudaMemcpyHostToDevice)); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","combination[] tranfer time =", cuda_time); // Execute & timer //cudaEventRecord(start, 0); Ker_Check_Combination<<< ( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE >>> (d_combination ,combination_size , d_result); cuda_err= cudaSuccess; cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { #ifdef DEBUG fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err)); #endif exit(EXIT_FAILURE); } #ifdef DEBUG else fprintf(stderr, "launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err)); #endif //checkCudaErrors(cudaDeviceSynchronize()); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","CUDA Kernel run time=",cuda_time); // D->H and timer h_result[0]=0; //cudaEventRecord(start, 0); checkCudaErrors(cudaMemcpy(h_result, d_result, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); cuda_err = cudaSuccess; cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { #ifdef DEBUG fprintf(stderr, "D->H error! (error code= %s)!\n", cudaGetErrorString(cuda_err)); #endif exit(EXIT_FAILURE); } #ifdef DEBUG else fprintf(stderr, "D->H successed ! ( code= %s)!\n", cudaGetErrorString(cuda_err)); #endif //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&cuda_time, start, stop); //if(DEBUG) printf("%-40s %f ms \n","CUDA D->H time = ", cuda_time); //printf("testORI Kernel OK! result= %d \n",h_result[0]); unsigned int conflicts = h_result[0]; //************************************************************************************************************ //free memory //cudaEventDestroy(start); //cudaEventDestroy(stop); // free(h_combination); //it is a pointer point to static array a[][], need not to free; free(h_result); checkCudaErrors( cudaFree(d_combination) ); checkCudaErrors( cudaFree(d_result) ); checkCudaErrors( cudaDeviceSynchronize() ); //checkCudaErrors( cudaDeviceReset() ); return conflicts; }
a397e0c39f8289ca9474fa000bcf43a23e9e8916.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // 2-point angular correlation const int BLOCKSIZE = 256; const int ROWSPERTHREAD = 256; // Columns are D and rows are R // All computation in single-precision __global__ void DR_kernel(int nCols, int nRows, float *D, float *R, unsigned long long int *gHist) { // The thread id on the x-axis and y-axis int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * ROWSPERTHREAD; // If the thread is inside the domain if (x < nCols) { // Shared histogram for the thread block __shared__ unsigned int sHist[720]; // Thread number zero will initialize the shared memory if (threadIdx.x == 0) { for (int i = 0; i < 720; i++) { sHist[i] = 0; } } __syncthreads(); // Right ascension and declination in degrees for the current column float asc1 = D[x * 2]; float dec1 = D[x * 2 + 1]; // The amount of rows to be calculated is ROWSPERTHREAD or rows left in the domain, whichever is smaller int nElements = min(nRows-y, ROWSPERTHREAD); for (int j = 0; j < nElements; j++) { // Right ascension and declination degrees for the current row float asc2 = R[y + j * 2]; float dec2 = R[y + j * 2 + 1]; // Compute the intermediate value float tmp = sinf(dec1) * sinf(dec2) + cosf(dec1) * cosf(dec2) * cosf(asc1-asc2); // Clamp it to -1, 1 tmp = fminf(tmp, 1.0f); tmp = fmaxf(tmp, -1.0f); // Compute the angle in radians float radianResult = acosf(tmp); // Convert to degrees float degreeResult = radianResult * 180.0f/3.14159f; // Compute the bin index int resultIndex = floor(degreeResult * 4.0f); // Increment the bin in the shared histogram atomicAdd(&sHist[resultIndex], 1); } __syncthreads(); // Thread number zero will write the shared histogram to global device memory if (threadIdx.x == 0) { for (int i = 0; i < 720; i++) { // Update the global histogram with the shared histogram atomicAdd(&gHist[i], sHist[i]); } } } }
a397e0c39f8289ca9474fa000bcf43a23e9e8916.cu
#include "includes.h" // 2-point angular correlation const int BLOCKSIZE = 256; const int ROWSPERTHREAD = 256; // Columns are D and rows are R // All computation in single-precision __global__ void DR_kernel(int nCols, int nRows, float *D, float *R, unsigned long long int *gHist) { // The thread id on the x-axis and y-axis int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * ROWSPERTHREAD; // If the thread is inside the domain if (x < nCols) { // Shared histogram for the thread block __shared__ unsigned int sHist[720]; // Thread number zero will initialize the shared memory if (threadIdx.x == 0) { for (int i = 0; i < 720; i++) { sHist[i] = 0; } } __syncthreads(); // Right ascension and declination in degrees for the current column float asc1 = D[x * 2]; float dec1 = D[x * 2 + 1]; // The amount of rows to be calculated is ROWSPERTHREAD or rows left in the domain, whichever is smaller int nElements = min(nRows-y, ROWSPERTHREAD); for (int j = 0; j < nElements; j++) { // Right ascension and declination degrees for the current row float asc2 = R[y + j * 2]; float dec2 = R[y + j * 2 + 1]; // Compute the intermediate value float tmp = sinf(dec1) * sinf(dec2) + cosf(dec1) * cosf(dec2) * cosf(asc1-asc2); // Clamp it to -1, 1 tmp = fminf(tmp, 1.0f); tmp = fmaxf(tmp, -1.0f); // Compute the angle in radians float radianResult = acosf(tmp); // Convert to degrees float degreeResult = radianResult * 180.0f/3.14159f; // Compute the bin index int resultIndex = floor(degreeResult * 4.0f); // Increment the bin in the shared histogram atomicAdd(&sHist[resultIndex], 1); } __syncthreads(); // Thread number zero will write the shared histogram to global device memory if (threadIdx.x == 0) { for (int i = 0; i < 720; i++) { // Update the global histogram with the shared histogram atomicAdd(&gHist[i], sHist[i]); } } } }
38cccee988e92b0c38dc7fda139762a405488dd7.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 IBM Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifdef USE_ROCM #include "cuda_kernel_launcher.h" #include <LightGBM/utils/log.h> #include <hip/hip_runtime.h> #include <cstdio> namespace LightGBM { void cuda_histogram( int histogram_size, data_size_t leaf_num_data, data_size_t num_data, bool use_all_features, bool is_constant_hessian, int num_workgroups, hipStream_t stream, uint8_t* arg0, uint8_t* arg1, data_size_t arg2, data_size_t* arg3, data_size_t arg4, score_t* arg5, score_t* arg6, score_t arg6_const, char* arg7, volatile int* arg8, void* arg9, size_t exp_workgroups_per_feature) { if (histogram_size == 16) { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram16_fulldata), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram16_fulldata), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram16), dim3(num_workgroups), dim3(16), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } else if (histogram_size == 64) { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram64_fulldata), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram64_fulldata), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram64), dim3(num_workgroups), dim3(64), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } else { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram256_fulldata), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram256_fulldata), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else hipLaunchKernelGGL(( histogram256), dim3(num_workgroups), dim3(256), 0, stream, arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } } } // namespace LightGBM #endif // USE_ROCM
38cccee988e92b0c38dc7fda139762a405488dd7.cu
/*! * Copyright (c) 2020 IBM Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifdef USE_CUDA #include "cuda_kernel_launcher.h" #include <LightGBM/utils/log.h> #include <cuda_runtime.h> #include <cstdio> namespace LightGBM { void cuda_histogram( int histogram_size, data_size_t leaf_num_data, data_size_t num_data, bool use_all_features, bool is_constant_hessian, int num_workgroups, cudaStream_t stream, uint8_t* arg0, uint8_t* arg1, data_size_t arg2, data_size_t* arg3, data_size_t arg4, score_t* arg5, score_t* arg6, score_t arg6_const, char* arg7, volatile int* arg8, void* arg9, size_t exp_workgroups_per_feature) { if (histogram_size == 16) { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram16_fulldata<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram16_fulldata<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram16<<<num_workgroups, 16, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } else if (histogram_size == 64) { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram64_fulldata<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram64_fulldata<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram64<<<num_workgroups, 64, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } else { if (leaf_num_data == num_data) { if (use_all_features) { if (!is_constant_hessian) histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram256_fulldata<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram256_fulldata<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } else { if (use_all_features) { // seems all features is always enabled, so this should be the same as fulldata if (!is_constant_hessian) histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } else { if (!is_constant_hessian) histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); else histogram256<<<num_workgroups, 256, 0, stream>>>(arg0, arg1, arg2, arg3, arg4, arg5, arg6_const, arg7, arg8, static_cast<acc_type*>(arg9), exp_workgroups_per_feature); } } } } } // namespace LightGBM #endif // USE_CUDA
cc6b0cf94fe2723b58567ff2f1da5b7e25ed6da8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> namespace at { namespace native { // Implement as functors since lambdas don't get optimized. class ReduceMultiply { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMul(self_data_start + index, *src_data); } }; static ReduceMultiply reduce_multiply; class ReduceAdd { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { fastAtomicAdd(self_data_start, index, numel, *src_data, true); } }; static ReduceAdd reduce_add; class ReduceMean { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { fastAtomicAdd(self_data_start, index, numel, *src_data, true); } }; static ReduceMean reduce_mean; class ReduceMinimum { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMin(self_data_start + index, *src_data); } }; static ReduceMinimum reduce_minimum; class ReduceMaximum { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMax(self_data_start + index, *src_data); } }; static ReduceMaximum reduce_maximum; class TensorAssign { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning *(self_data_start + index) = *src_data; } }; static TensorAssign tensor_assign; // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } const dim3 block(nt); const dim3 grid((N + block.x * vt - 1) / (block.x * vt)); const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, int64_t numel, // Do not use `const` qualifier here as it may cause issue in cuda 11.6.x. See #75434, #75545 const func_t& f ) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, numel, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); f( (scalar_t*)(self_ptr + offsets[0]), is_scatter_like ? idx_dim * index_stride : 0, numel, (scalar_t*)(src_ptr + offsets[1]) + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const ReduceAdd& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const TensorAssign& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } template <typename func_t> void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, int64_t numel, // Do not use `const` qualifier here as it may cause issue in cuda 11.6.x. See #75434, #75545 const func_t& f ) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, numel, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); f( (scalar_t*)(self_ptr + offsets[0]), idx_dim * index_stride, numel, (scalar_t*)&src_val ); }; _launch_scatter_gather_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( const Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_fill_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, self.numel(), f ); } ); } void operator()( const Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const ReduceMultiply& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_fill_base_kernel_reduce_multiply", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, self.numel(), f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", tensor_assign); } void scatter_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", tensor_assign); } void scatter_fill_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Scalar& src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", tensor_assign); } void scatter_add_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", reduce_add); } void scatter_reduce_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Tensor& src, const SCATTER_GATHER_OP& reduce) { switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_add_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_multiply_", reduce_multiply); break; default : break; } } void scatter_reduce_two_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Tensor& src, const SCATTER_GATHER_OP& reduce) { globalContext().alertNotDeterministic("scatter_reduce_cuda"); switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_sum_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_prod_", reduce_multiply); break; case SCATTER_GATHER_OP::REDUCE_MAXIMUM : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_amax_", reduce_maximum); break; case SCATTER_GATHER_OP::REDUCE_MINIMUM : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_amin_", reduce_minimum); break; case SCATTER_GATHER_OP::REDUCE_MEAN : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_mean_", reduce_mean); break; } } void scatter_scalar_reduce_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Scalar& value, const SCATTER_GATHER_OP& reduce) { switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_fill_base_kernel<false>()(self, dim, index, value, "scatter_fill_cuda_add_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_fill_base_kernel<false>()(self, dim, index, value, "scatter_fill_cuda_multiply_", reduce_multiply); break; default : break; } } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); REGISTER_DISPATCH(scatter_reduce_stub, &scatter_reduce_cuda_kernel); REGISTER_DISPATCH(scatter_scalar_reduce_stub, &scatter_scalar_reduce_cuda_kernel); REGISTER_DISPATCH(scatter_reduce_two_stub, &scatter_reduce_two_cuda_kernel); }} // namespace at::native
cc6b0cf94fe2723b58567ff2f1da5b7e25ed6da8.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> namespace at { namespace native { // Implement as functors since lambdas don't get optimized. class ReduceMultiply { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMul(self_data_start + index, *src_data); } }; static ReduceMultiply reduce_multiply; class ReduceAdd { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { fastAtomicAdd(self_data_start, index, numel, *src_data, true); } }; static ReduceAdd reduce_add; class ReduceMean { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { fastAtomicAdd(self_data_start, index, numel, *src_data, true); } }; static ReduceMean reduce_mean; class ReduceMinimum { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMin(self_data_start + index, *src_data); } }; static ReduceMinimum reduce_minimum; class ReduceMaximum { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning gpuAtomicMax(self_data_start + index, *src_data); } }; static ReduceMaximum reduce_maximum; class TensorAssign { public: template <typename scalar_t> constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, const scalar_t * src_data) const { (void)numel; // suppress unused warning *(self_data_start + index) = *src_data; } }; static TensorAssign tensor_assign; // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } const dim3 block(nt); const dim3 grid((N + block.x * vt - 1) / (block.x * vt)); const auto stream = at::cuda::getCurrentCUDAStream(); _scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, int64_t numel, // Do not use `const` qualifier here as it may cause issue in cuda 11.6.x. See #75434, #75545 const func_t& f ) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, numel, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); f( (scalar_t*)(self_ptr + offsets[0]), is_scatter_like ? idx_dim * index_stride : 0, numel, (scalar_t*)(src_ptr + offsets[1]) + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const ReduceAdd& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const TensorAssign& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } template <typename func_t> void operator()( const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_gather_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, self.numel(), f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, int64_t numel, // Do not use `const` qualifier here as it may cause issue in cuda 11.6.x. See #75434, #75545 const func_t& f ) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, numel, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); f( (scalar_t*)(self_ptr + offsets[0]), idx_dim * index_stride, numel, (scalar_t*)&src_val ); }; _launch_scatter_gather_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( const Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_fill_base_kernel_func", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, self.numel(), f ); } ); } void operator()( const Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const ReduceMultiply& f ) { at::assert_no_internal_overlap(self); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cuda_scatter_fill_base_kernel_reduce_multiply", [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, self.numel(), f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", tensor_assign); } void scatter_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", tensor_assign); } void scatter_fill_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Scalar& src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", tensor_assign); } void scatter_add_cuda_kernel(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", reduce_add); } void scatter_reduce_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Tensor& src, const SCATTER_GATHER_OP& reduce) { switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_add_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_multiply_", reduce_multiply); break; default : break; } } void scatter_reduce_two_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Tensor& src, const SCATTER_GATHER_OP& reduce) { globalContext().alertNotDeterministic("scatter_reduce_cuda"); switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_sum_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_prod_", reduce_multiply); break; case SCATTER_GATHER_OP::REDUCE_MAXIMUM : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_amax_", reduce_maximum); break; case SCATTER_GATHER_OP::REDUCE_MINIMUM : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_amin_", reduce_minimum); break; case SCATTER_GATHER_OP::REDUCE_MEAN : cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src, "scatter_reduce_cuda_mean_", reduce_mean); break; } } void scatter_scalar_reduce_cuda_kernel(const Tensor& self, const int64_t dim, const Tensor& index, const Scalar& value, const SCATTER_GATHER_OP& reduce) { switch (reduce) { case SCATTER_GATHER_OP::REDUCE_ADD : cuda_scatter_fill_base_kernel<false>()(self, dim, index, value, "scatter_fill_cuda_add_", reduce_add); break; case SCATTER_GATHER_OP::REDUCE_MULTIPLY : cuda_scatter_fill_base_kernel<false>()(self, dim, index, value, "scatter_fill_cuda_multiply_", reduce_multiply); break; default : break; } } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); REGISTER_DISPATCH(scatter_reduce_stub, &scatter_reduce_cuda_kernel); REGISTER_DISPATCH(scatter_scalar_reduce_stub, &scatter_scalar_reduce_cuda_kernel); REGISTER_DISPATCH(scatter_reduce_two_stub, &scatter_reduce_two_cuda_kernel); }} // namespace at::native
f122505fbe9ce9dae4acc666d8cd3e89f07a1325.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions mixed zc -> ds */ #include "common_magma.h" #define num_threadzc 64 __global__ void zclaswp_kernel(int n, hipDoubleComplex *a, int lda, cuFloatComplex *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadzc + threadIdx.x; int newind; cuFloatComplex res; if (ind < m) { sa += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_C_MAKE( (float)cuCreal(a[newind+i*lda]), (float)cuCimag(a[newind+i*lda]) ); sa[i*lda] = res; } } } __global__ void zclaswp_inv_kernel(int n, hipDoubleComplex *a, int lda, cuFloatComplex *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadzc + threadIdx.x; int newind; hipDoubleComplex res; if (ind < m) { a += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_Z_MAKE( (double)cuCrealf(sa[newind+i*lda]), (double)cuCimagf(sa[newind+i*lda]) ); a[i*lda] = res; } } } extern "C" void magmablas_zclaswp( magma_int_t n, hipDoubleComplex *a, magma_int_t lda, cuFloatComplex *sa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A. A - (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the matrix of column dimension N and row dimension M to which the row interchanges will be applied. LDA - (input) INTEGER. LDA specifies the leading dimension of A. SA - (output) REAL array on the GPU, dimension (LDA,N) On exit, the casted to single precision and permuted matrix. M - (input) The number of rows to be interchanged. IPIV - (input) INTEGER array, dimension (M) The vector of pivot indices. Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. INCX - (input) INTEGER If IPIV is negative, the pivots are applied in reverse order, otherwise in straight-forward order. ===================================================================== */ int blocks; if (m % num_threadzc==0) blocks = m/num_threadzc; else blocks = m/num_threadzc + 1; dim3 grid(blocks, 1, 1); dim3 threazc(num_threadzc, 1, 1); if (incx >=0) hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threazc), 0, magma_stream , n, a, lda, sa, m, ipiv); else hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threazc), 0, magma_stream , n, a, lda, sa, m, ipiv); } #undef num_threadzc
f122505fbe9ce9dae4acc666d8cd3e89f07a1325.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions mixed zc -> ds */ #include "common_magma.h" #define num_threadzc 64 __global__ void zclaswp_kernel(int n, cuDoubleComplex *a, int lda, cuFloatComplex *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadzc + threadIdx.x; int newind; cuFloatComplex res; if (ind < m) { sa += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_C_MAKE( (float)cuCreal(a[newind+i*lda]), (float)cuCimag(a[newind+i*lda]) ); sa[i*lda] = res; } } } __global__ void zclaswp_inv_kernel(int n, cuDoubleComplex *a, int lda, cuFloatComplex *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadzc + threadIdx.x; int newind; cuDoubleComplex res; if (ind < m) { a += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_Z_MAKE( (double)cuCrealf(sa[newind+i*lda]), (double)cuCimagf(sa[newind+i*lda]) ); a[i*lda] = res; } } } extern "C" void magmablas_zclaswp( magma_int_t n, cuDoubleComplex *a, magma_int_t lda, cuFloatComplex *sa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A. A - (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the matrix of column dimension N and row dimension M to which the row interchanges will be applied. LDA - (input) INTEGER. LDA specifies the leading dimension of A. SA - (output) REAL array on the GPU, dimension (LDA,N) On exit, the casted to single precision and permuted matrix. M - (input) The number of rows to be interchanged. IPIV - (input) INTEGER array, dimension (M) The vector of pivot indices. Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. INCX - (input) INTEGER If IPIV is negative, the pivots are applied in reverse order, otherwise in straight-forward order. ===================================================================== */ int blocks; if (m % num_threadzc==0) blocks = m/num_threadzc; else blocks = m/num_threadzc + 1; dim3 grid(blocks, 1, 1); dim3 threazc(num_threadzc, 1, 1); if (incx >=0) zclaswp_kernel<<< grid, threazc, 0, magma_stream >>>(n, a, lda, sa, m, ipiv); else zclaswp_inv_kernel<<< grid, threazc, 0, magma_stream >>>(n, a, lda, sa, m, ipiv); } #undef num_threadzc
f9a4000fe6a5cdf1a022e925240f6c0dc2a8b724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #include <hip/hip_complex.h> __global__ void rdiv_strided_float(int n,int xOffset,int yOffset, float *dx,float *dy,int incx,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0) result[i] = dx[i] / dy[i]; } }
f9a4000fe6a5cdf1a022e925240f6c0dc2a8b724.cu
extern "C" #include <cuComplex.h> __global__ void rdiv_strided_float(int n,int xOffset,int yOffset, float *dx,float *dy,int incx,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0) result[i] = dx[i] / dy[i]; } }
9f08152e75f4520b71e5a02fe4d139f834121746.hip
// !!! This is a file automatically generated by hipify!!! #include <utilities/norm_inf.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/reduce.h> #include <core/errors.h> void norm_inf_host( real *dev, real *host, int len, real *res, real *idx, real *scratch) { copy_host_device( host, dev, len * sizeof(real), hipMemcpyDeviceToHost, ERROR_DEBUG); *res = fabs( host[0] ); *idx = 0; for (int i = 1; i < len; i ++) { if ( *res < fabs( host[ i ] ) ){ *res = fabs( host[ i ] ); *idx = i; } } } void norm_inf( real *dev, real *host, int len, real *res, real *idx, real *scratch) { /* int blocks = len / (8 * BLOCK_SIZE) + (( len % (8 * BLOCK_SIZE)) == 0 ? 0 : 1 ); kerNormInf <<< blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(real) >>> ( dev, scratch, len); hipDeviceSynchronize (); cudaCheckError (); //Now fine the greattest among the number of blocks results. kerNormInf <<< 1, blocks, BLOCK_SIZE * sizeof(real) >>> ( scratch, res, blocks ); hipDeviceSynchronize (); cudaCheckError (); */ copy_host_device( host, dev, len * sizeof(real), hipMemcpyDeviceToHost, ERROR_DEBUG); *res = fabs( host[0] ); *idx = 0; for (int i = 1; i < len; i ++) { if ( *res < fabs( host[ i ] ) ){ *res = fabs( host[ i ] ); *idx = i; } } }
9f08152e75f4520b71e5a02fe4d139f834121746.cu
#include <utilities/norm_inf.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/reduce.h> #include <core/errors.h> void norm_inf_host( real *dev, real *host, int len, real *res, real *idx, real *scratch) { copy_host_device( host, dev, len * sizeof(real), cudaMemcpyDeviceToHost, ERROR_DEBUG); *res = fabs( host[0] ); *idx = 0; for (int i = 1; i < len; i ++) { if ( *res < fabs( host[ i ] ) ){ *res = fabs( host[ i ] ); *idx = i; } } } void norm_inf( real *dev, real *host, int len, real *res, real *idx, real *scratch) { /* int blocks = len / (8 * BLOCK_SIZE) + (( len % (8 * BLOCK_SIZE)) == 0 ? 0 : 1 ); kerNormInf <<< blocks, BLOCK_SIZE, BLOCK_SIZE * sizeof(real) >>> ( dev, scratch, len); cudaThreadSynchronize (); cudaCheckError (); //Now fine the greattest among the number of blocks results. kerNormInf <<< 1, blocks, BLOCK_SIZE * sizeof(real) >>> ( scratch, res, blocks ); cudaThreadSynchronize (); cudaCheckError (); */ copy_host_device( host, dev, len * sizeof(real), cudaMemcpyDeviceToHost, ERROR_DEBUG); *res = fabs( host[0] ); *idx = 0; for (int i = 1; i < len; i ++) { if ( *res < fabs( host[ i ] ) ){ *res = fabs( host[ i ] ); *idx = i; } } }
summarystatsreduce.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
summarystatsreduce.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <cuda.h> #include <cuda_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); summaryStatsReduceT<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
b84227af845eb255f5ecf99e830a68d26bc2436e.hip
// !!! This is a file automatically generated by hipify!!! ///////////////////////////////////////////////////////////////////////// // Parallel Computing Assignment 3 // Chris Jimenez // 5/1/14 // This CUDA program finds the max integer in an array of random integers. // This program DOES NOT use shared meemory and DOES NOT take thread // divergaence in to consideration. // ///////////////////////////////////////////////////////////////////////// #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> //define numebr of integers... #define NUM_OF_INTEGERS 65536 //define max integer #define MAX 100000 /////////////////////////////////// /*The folllowing is dependent on whatever GPU this program is running on if runnign on the NYU GPU's, the max threads per block is 512. RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads per block is 1024 */ #define THREADS_PER_BLOCK 512 #define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK /****** Function declarations */ void fill_array(); __global__ void get_max(int *array); /********************************/ ///////////////////////////////////////////////////////// /*******************************************************/ /* Function fills the givne array a with random integers */ void fill_array(int *a){ int i; time_t t; /* Intializes random number generator */ srand((unsigned) time(&t)); for(i = 0; i < NUM_OF_INTEGERS; i++){ a[i] = random() % MAX;; } } /*******************************************************/ /* Kernel Function finds the max integer in given array by using reduction technique. Ultimately, the largest will be located at the 0th position of the array */ __global__ void get_max(int *array){ int temp; int index = threadIdx.x + (blockDim.x * blockIdx.x); int nTotalThreads = NUM_OF_INTEGERS; while(nTotalThreads > 1){ int halfPoint = nTotalThreads / 2; // divide by two // only the first half of the threads will be active. if (index < halfPoint){ temp = array[ index + halfPoint ]; if (temp > array[ index ]) { array[index] = temp; } } __syncthreads(); nTotalThreads = nTotalThreads / 2; // divide by two. } } /*******************************************************/ //Main function..... int main(int argc, char *argv[]){ int *h_array; //array of random integers.... int *d_array; //device copy... printf("Initializing data...\n"); //allocating space for the array on host h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int)); //fill in random array fill_array(h_array); //allocate space for array and resultmax on device hipMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS ); //Copy array from host to device... hipMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyHostToDevice); //call kernel! using for loop hipLaunchKernelGGL(( get_max), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, d_array); //Copy array from device to host... hipMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyDeviceToHost); //print max value... printf("The max integer in the array is: %d\n", h_array[0]); printf("Cleaning up...\n"); free(h_array); hipFree(d_array); return 0; }
b84227af845eb255f5ecf99e830a68d26bc2436e.cu
///////////////////////////////////////////////////////////////////////// // Parallel Computing Assignment 3 // Chris Jimenez // 5/1/14 // This CUDA program finds the max integer in an array of random integers. // This program DOES NOT use shared meemory and DOES NOT take thread // divergaence in to consideration. // ///////////////////////////////////////////////////////////////////////// #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> //define numebr of integers... #define NUM_OF_INTEGERS 65536 //define max integer #define MAX 100000 /////////////////////////////////// /*The folllowing is dependent on whatever GPU this program is running on if runnign on the NYU GPU's, the max threads per block is 512. RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads per block is 1024 */ #define THREADS_PER_BLOCK 512 #define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK /****** Function declarations */ void fill_array(); __global__ void get_max(int *array); /********************************/ ///////////////////////////////////////////////////////// /*******************************************************/ /* Function fills the givne array a with random integers */ void fill_array(int *a){ int i; time_t t; /* Intializes random number generator */ srand((unsigned) time(&t)); for(i = 0; i < NUM_OF_INTEGERS; i++){ a[i] = random() % MAX;; } } /*******************************************************/ /* Kernel Function finds the max integer in given array by using reduction technique. Ultimately, the largest will be located at the 0th position of the array */ __global__ void get_max(int *array){ int temp; int index = threadIdx.x + (blockDim.x * blockIdx.x); int nTotalThreads = NUM_OF_INTEGERS; while(nTotalThreads > 1){ int halfPoint = nTotalThreads / 2; // divide by two // only the first half of the threads will be active. if (index < halfPoint){ temp = array[ index + halfPoint ]; if (temp > array[ index ]) { array[index] = temp; } } __syncthreads(); nTotalThreads = nTotalThreads / 2; // divide by two. } } /*******************************************************/ //Main function..... int main(int argc, char *argv[]){ int *h_array; //array of random integers.... int *d_array; //device copy... printf("Initializing data...\n"); //allocating space for the array on host h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int)); //fill in random array fill_array(h_array); //allocate space for array and resultmax on device cudaMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS ); //Copy array from host to device... cudaMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyHostToDevice); //call kernel! using for loop get_max<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_array); //Copy array from device to host... cudaMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyDeviceToHost); //print max value... printf("The max integer in the array is: %d\n", h_array[0]); printf("Cleaning up...\n"); free(h_array); cudaFree(d_array); return 0; }
f85e28c4a9e0dc0ed9c72f4119fd0db977aa0ad3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 10000 int arr[N]; int main() { srand(time(NULL)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); for(int i = 0; i < N; i++){ arr[i] = rand() % 1000; } hipEventRecord(start); for (unsigned int s=1; s < N; s *= 2) { for (unsigned int i=0; i < N; i++) { if (i + s < N) { arr[i] = arr[i + s] > arr[i] ? arr[i + s] : arr[i]; } } } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("time : %f\n", milliseconds); printf("%d\n", arr[0]); return 0; }
f85e28c4a9e0dc0ed9c72f4119fd0db977aa0ad3.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #define N 10000 int arr[N]; int main() { srand(time(NULL)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(int i = 0; i < N; i++){ arr[i] = rand() % 1000; } cudaEventRecord(start); for (unsigned int s=1; s < N; s *= 2) { for (unsigned int i=0; i < N; i++) { if (i + s < N) { arr[i] = arr[i + s] > arr[i] ? arr[i + s] : arr[i]; } } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("time : %f\n", milliseconds); printf("%d\n", arr[0]); return 0; }
1ed153ff84f7e9f2c48fa8656608d668fd7aa71f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hipfft.h> const int DEFAULT_SIGNAL_LENGTH = 4096; const int DEFAULT_FFT_TRIALS = 10000; const int DEFAULT_META_TRIALS = 10; const int BATCH_SIZE = 1; int main(int argc, char **argv) { int fft_trials = DEFAULT_FFT_TRIALS; int meta_trials = DEFAULT_META_TRIALS; printf("[INFO] META trials: %d\n", meta_trials); printf("[INFO] FFT trials: %d\n", fft_trials); long signal_length = DEFAULT_SIGNAL_LENGTH; printf("[INFO] Signal Length: %ld\n", signal_length); hipfftComplex *h_original_signal; hipHostMalloc((void **) &h_original_signal, sizeof(hipfftComplex) * signal_length); hipfftComplex *d_original_signal, *d_applied_fft_signal; hipMalloc((void **) &d_original_signal, sizeof(hipfftComplex) * signal_length); hipMalloc((void **) &d_applied_fft_signal, sizeof(hipfftComplex) * signal_length); /* * generate random signal as original signal */ srand(0); // initialize random seed for (int i = 0; i < signal_length; i++) { h_original_signal[i].x = (float)rand() / RAND_MAX; h_original_signal[i].y = 0.0; } hipMemcpy(d_original_signal, h_original_signal, sizeof(hipfftComplex) * signal_length, hipMemcpyHostToDevice); hipfftHandle fft_plan; hipfftPlan1d(&fft_plan, signal_length, HIPFFT_C2C, BATCH_SIZE); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float sum_of_elapsed_times = 0.0; printf("[INFO] Run benchmark...\n"); for (int i = 0; i < meta_trials; i++) { hipEventRecord(start, 0); for (int j = 0; j < fft_trials; j++) { hipfftExecC2C(fft_plan, d_original_signal, d_applied_fft_signal, HIPFFT_FORWARD); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsed_time_ms; hipEventElapsedTime(&elapsed_time_ms, start, stop); float elapsed_time_sec = elapsed_time_ms / 1000.0; sum_of_elapsed_times += elapsed_time_sec; printf("%f sec\n", elapsed_time_sec); } printf("[INFO] Finished!\n"); printf("[INFO] Average: %lf sec\n", sum_of_elapsed_times / meta_trials); hipEventDestroy(start); hipEventDestroy(stop); }
1ed153ff84f7e9f2c48fa8656608d668fd7aa71f.cu
#include <stdio.h> #include <stdlib.h> #include <cufft.h> const int DEFAULT_SIGNAL_LENGTH = 4096; const int DEFAULT_FFT_TRIALS = 10000; const int DEFAULT_META_TRIALS = 10; const int BATCH_SIZE = 1; int main(int argc, char **argv) { int fft_trials = DEFAULT_FFT_TRIALS; int meta_trials = DEFAULT_META_TRIALS; printf("[INFO] META trials: %d\n", meta_trials); printf("[INFO] FFT trials: %d\n", fft_trials); long signal_length = DEFAULT_SIGNAL_LENGTH; printf("[INFO] Signal Length: %ld\n", signal_length); cufftComplex *h_original_signal; cudaMallocHost((void **) &h_original_signal, sizeof(cufftComplex) * signal_length); cufftComplex *d_original_signal, *d_applied_fft_signal; cudaMalloc((void **) &d_original_signal, sizeof(cufftComplex) * signal_length); cudaMalloc((void **) &d_applied_fft_signal, sizeof(cufftComplex) * signal_length); /* * generate random signal as original signal */ srand(0); // initialize random seed for (int i = 0; i < signal_length; i++) { h_original_signal[i].x = (float)rand() / RAND_MAX; h_original_signal[i].y = 0.0; } cudaMemcpy(d_original_signal, h_original_signal, sizeof(cufftComplex) * signal_length, cudaMemcpyHostToDevice); cufftHandle fft_plan; cufftPlan1d(&fft_plan, signal_length, CUFFT_C2C, BATCH_SIZE); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float sum_of_elapsed_times = 0.0; printf("[INFO] Run benchmark...\n"); for (int i = 0; i < meta_trials; i++) { cudaEventRecord(start, 0); for (int j = 0; j < fft_trials; j++) { cufftExecC2C(fft_plan, d_original_signal, d_applied_fft_signal, CUFFT_FORWARD); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed_time_ms; cudaEventElapsedTime(&elapsed_time_ms, start, stop); float elapsed_time_sec = elapsed_time_ms / 1000.0; sum_of_elapsed_times += elapsed_time_sec; printf("%f sec\n", elapsed_time_sec); } printf("[INFO] Finished!\n"); printf("[INFO] Average: %lf sec\n", sum_of_elapsed_times / meta_trials); cudaEventDestroy(start); cudaEventDestroy(stop); }
64cf33e28c0afcbc29134f1f9a83ce95e3b520cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2021 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/event.h" #include "saiga/cuda/pinned_vector.h" #include "saiga/cuda/stream.h" #include "saiga/util/math.h" #include "saiga/cuda/tests/test_helper.h" using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; /** * Solution of the following problem on the GPU: * * for(int i = 0; i < 100; ++i) * { * if(compute(i) > threshold) * break; * } * * * Problem: * - The result of compute(i) is stored in device memory * - The CPU has to see it to decide if we can break early from this loop * * 3 Different Solution are given here: * * 1. Synchronous memcpy after compute in every step * 2. Asynchronous memcpy and sync on earlier iteration * -> The loop is terminated a few iterations too late, but no host-device sync is required * 3. The loop is moved to a kernel with dynamic parallelism * * * Output: * * Name Time (ms) Bandwidth (GB/s) * 1. Sync memcpy 2.59715 0.0252338 * 2. Async Streams 1.72704 0.037947 * 3. Dynamic Parallelism 2.01776 0.0324796 * * */ // Ressources: // https://devblogs.nvidia.com/introduction-cuda-dynamic-parallelism/ // https://devblogs.nvidia.com/cuda-dynamic-parallelism-api-principles/ template <int K> class SAIGA_ALIGN(16) Element { public: vec4 data = vec4(1.1); HD inline void operator()() { for (int k = 0; k < K; ++k) { data = data * data + data; } } }; template <typename T> __global__ static void process(ArrayView<T> data, ArrayView<float> residual, int it) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; T e = data[ti.thread_id]; e(); data[ti.thread_id] = e; if(ti.thread_id == 0) residual[it] = it; } template<typename T> static void __global__ earlyExitDP(ArrayView<T> data, ArrayView<float> residual, int maxIts, int earlyExitIterations) { auto N = data.size(); for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(data,residual,i); hipDeviceSynchronize(); auto res = residual[i]; if(res > earlyExitIterations) break; } } int main() { int N = 2 * 1024; int maxIts = 100; using T = Element<16*512>; Saiga::thrust::pinned_vector<T> h_data(N); thrust::device_vector<T> d_data(N); Saiga::thrust::pinned_vector<float> h_res(maxIts); thrust::device_vector<float> d_res(maxIts,0); Saiga::CUDA::PerformanceTestHelper test("DP",N*2*sizeof(T)); float earlyExitIterations = 50; int testIts = 15; #if 0 { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,0); }); test.addMeassurement("single iteration", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); } }); test.addMeassurement("100 iterations", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < earlyExitIterations; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); } }); test.addMeassurement("early exit", st.median); CUDA_SYNC_CHECK_ERROR(); } #endif { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); hipMemcpy(h_res.data()+i,d_res.data().get()+i,4,hipMemcpyDeviceToHost); auto res = h_res[i]; if(res > earlyExitIterations) break; } }); test.addMeassurement("1. Sync memcpy", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); Saiga::CUDA::CudaStream strm; Saiga::CUDA::CudaStream cpystrm; int tileSize = 4; int numTiles = maxIts / tileSize; std::vector<Saiga::CUDA::CudaEvent> events(numTiles); std::vector<Saiga::CUDA::CudaEvent> events2(numTiles); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int t= 0; t < numTiles; ++t) { if(t > 1) { // Wait on previous tile // Check residual and break events[t-2].synchronize(); int lastFromPreviousTile = (t-2) * tileSize + tileSize-1; if(h_res[lastFromPreviousTile] > earlyExitIterations) { break; } } // Queue next tile for(int i = 0; i < tileSize; ++i) { int it = t * tileSize + i; hipLaunchKernelGGL(( process<T>), dim3(THREAD_BLOCK(N,128)),dim3(0),strm, 0, d_data,d_res,it); } events2[t].record(strm); cpystrm.waitForEvent(events2[t]); int lastFromCurrentTile = t * tileSize + tileSize - 1; hipMemcpyAsync(h_res.data()+lastFromCurrentTile,d_res.data().get()+lastFromCurrentTile,4,hipMemcpyDeviceToHost,cpystrm); events[t].record(cpystrm); } }); test.addMeassurement("2. Async Streams", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { hipLaunchKernelGGL(( earlyExitDP<T>), dim3(1),dim3(1), 0, 0, d_data,d_res,maxIts,earlyExitIterations); }); test.addMeassurement("3. Dynamic Parallelism", st.median); CUDA_SYNC_CHECK_ERROR(); } hipDeviceSynchronize(); cout << "Done." << endl; return 0; }
64cf33e28c0afcbc29134f1f9a83ce95e3b520cb.cu
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/event.h" #include "saiga/cuda/pinned_vector.h" #include "saiga/cuda/stream.h" #include "saiga/util/math.h" #include "saiga/cuda/tests/test_helper.h" using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; /** * Solution of the following problem on the GPU: * * for(int i = 0; i < 100; ++i) * { * if(compute(i) > threshold) * break; * } * * * Problem: * - The result of compute(i) is stored in device memory * - The CPU has to see it to decide if we can break early from this loop * * 3 Different Solution are given here: * * 1. Synchronous memcpy after compute in every step * 2. Asynchronous memcpy and sync on earlier iteration * -> The loop is terminated a few iterations too late, but no host-device sync is required * 3. The loop is moved to a kernel with dynamic parallelism * * * Output: * * Name Time (ms) Bandwidth (GB/s) * 1. Sync memcpy 2.59715 0.0252338 * 2. Async Streams 1.72704 0.037947 * 3. Dynamic Parallelism 2.01776 0.0324796 * * */ // Ressources: // https://devblogs.nvidia.com/introduction-cuda-dynamic-parallelism/ // https://devblogs.nvidia.com/cuda-dynamic-parallelism-api-principles/ template <int K> class SAIGA_ALIGN(16) Element { public: vec4 data = vec4(1.1); HD inline void operator()() { for (int k = 0; k < K; ++k) { data = data * data + data; } } }; template <typename T> __global__ static void process(ArrayView<T> data, ArrayView<float> residual, int it) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; T e = data[ti.thread_id]; e(); data[ti.thread_id] = e; if(ti.thread_id == 0) residual[it] = it; } template<typename T> static void __global__ earlyExitDP(ArrayView<T> data, ArrayView<float> residual, int maxIts, int earlyExitIterations) { auto N = data.size(); for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(data,residual,i); cudaDeviceSynchronize(); auto res = residual[i]; if(res > earlyExitIterations) break; } } int main() { int N = 2 * 1024; int maxIts = 100; using T = Element<16*512>; Saiga::thrust::pinned_vector<T> h_data(N); thrust::device_vector<T> d_data(N); Saiga::thrust::pinned_vector<float> h_res(maxIts); thrust::device_vector<float> d_res(maxIts,0); Saiga::CUDA::PerformanceTestHelper test("DP",N*2*sizeof(T)); float earlyExitIterations = 50; int testIts = 15; #if 0 { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,0); }); test.addMeassurement("single iteration", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); } }); test.addMeassurement("100 iterations", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < earlyExitIterations; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); } }); test.addMeassurement("early exit", st.median); CUDA_SYNC_CHECK_ERROR(); } #endif { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int i = 0;i < maxIts; ++i) { process<T><<<THREAD_BLOCK(N,128)>>>(d_data,d_res,i); cudaMemcpy(h_res.data()+i,d_res.data().get()+i,4,cudaMemcpyDeviceToHost); auto res = h_res[i]; if(res > earlyExitIterations) break; } }); test.addMeassurement("1. Sync memcpy", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); Saiga::CUDA::CudaStream strm; Saiga::CUDA::CudaStream cpystrm; int tileSize = 4; int numTiles = maxIts / tileSize; std::vector<Saiga::CUDA::CudaEvent> events(numTiles); std::vector<Saiga::CUDA::CudaEvent> events2(numTiles); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { for(int t= 0; t < numTiles; ++t) { if(t > 1) { // Wait on previous tile // Check residual and break events[t-2].synchronize(); int lastFromPreviousTile = (t-2) * tileSize + tileSize-1; if(h_res[lastFromPreviousTile] > earlyExitIterations) { break; } } // Queue next tile for(int i = 0; i < tileSize; ++i) { int it = t * tileSize + i; process<T><<<THREAD_BLOCK(N,128),0,strm>>>(d_data,d_res,it); } events2[t].record(strm); cpystrm.waitForEvent(events2[t]); int lastFromCurrentTile = t * tileSize + tileSize - 1; cudaMemcpyAsync(h_res.data()+lastFromCurrentTile,d_res.data().get()+lastFromCurrentTile,4,cudaMemcpyDeviceToHost,cpystrm); events[t].record(cpystrm); } }); test.addMeassurement("2. Async Streams", st.median); CUDA_SYNC_CHECK_ERROR(); } { thrust::fill(d_res.begin(),d_res.end(),0); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( testIts, [&]() { earlyExitDP<T><<<1,1>>>(d_data,d_res,maxIts,earlyExitIterations); }); test.addMeassurement("3. Dynamic Parallelism", st.median); CUDA_SYNC_CHECK_ERROR(); } cudaDeviceSynchronize(); cout << "Done." << endl; return 0; }
424bf330916914833aee343e54ef79e6bf117bbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <color_spinor.h> #include <dslash_quda.h> namespace quda { #ifdef GPU_CLOVER_DIRAC namespace { // anonymous #include <texture.h> } template<int N> void createEventArray(hipEvent_t (&event)[N], unsigned int flags=hipEventDefault) { for(int i=0; i<N; ++i) hipEventCreate(&event[i],flags); return; } template<int N> void destroyEventArray(hipEvent_t (&event)[N]) { for(int i=0; i<N; ++i) hipEventDestroy(event[i]); } static hipEvent_t packEnd; static hipEvent_t gatherEnd[4]; static hipEvent_t scatterEnd[4]; static hipEvent_t oprodStart; static hipEvent_t oprodEnd; void createCloverForceEvents(){ hipEventCreate(&packEnd, hipEventDisableTiming); createEventArray(gatherEnd, hipEventDisableTiming); createEventArray(scatterEnd, hipEventDisableTiming); hipEventCreate(&oprodStart, hipEventDisableTiming); hipEventCreate(&oprodEnd, hipEventDisableTiming); return; } void destroyCloverForceEvents(){ destroyEventArray(gatherEnd); destroyEventArray(scatterEnd); hipEventDestroy(packEnd); hipEventDestroy(oprodStart); hipEventDestroy(oprodEnd); return; } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> struct CloverForceArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; bool partitioned[4]; InputA inA; InputB inB_shift; InputC inC; InputD inD_shift; Gauge gauge; Output force; Float coeff; CloverForceArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType kernelType, const double coeff, InputA& inA, InputB& inB_shift, InputC& inC, InputD& inD_shift, Gauge& gauge, Output& force, GaugeField &meta) : length(meta.VolumeCB()), parity(parity), dir(5), displacement(displacement), kernelType(kernelType), coeff(coeff), inA(inA), inB_shift(inB_shift), inC(inC), inD_shift(inD_shift), gauge(gauge), force(force) { for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int& cb_idx, const int shift[4], const bool partitioned[4], const unsigned int& parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if(partitioned[dim]) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> __global__ void interiorOprodKernel(CloverForceArg<real, Output, Gauge, InputA, InputB, InputC, InputD> arg) { typedef complex<real> Complex; int idx = blockIdx.x*blockDim.x + threadIdx.x; ColorSpinor<real,3,4> A, B_shift, C, D_shift; Matrix<Complex,3> U, result, temp; while (idx<arg.length){ arg.inA.load(static_cast<Complex*>(A.data), idx); arg.inC.load(static_cast<Complex*>(C.data), idx); for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(nbr_idx >= 0){ arg.inB_shift.load(static_cast<Complex*>(B_shift.data), nbr_idx); arg.inD_shift.load(static_cast<Complex*>(D_shift.data), nbr_idx); B_shift = (B_shift.project(dim,1)).reconstruct(dim,1); result = outerProdSpinTrace(B_shift,A); D_shift = (D_shift.project(dim,-1)).reconstruct(dim,-1); result += outerProdSpinTrace(D_shift,C); arg.force.load(reinterpret_cast<real*>(temp.data), idx, dim, arg.parity); arg.gauge.load(reinterpret_cast<real*>(U.data), idx, dim, arg.parity); result = temp + U*result*arg.coeff; arg.force.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } // dir idx += gridDim.x*blockDim.x; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> __global__ void exteriorOprodKernel(CloverForceArg<real, Output, Gauge, InputA, InputB, InputC, InputD> arg) { typedef complex<real> Complex; int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; ColorSpinor<real,3,4> A, B_shift, C, D_shift; ColorSpinor<real,3,2> projected_tmp; Matrix<Complex,3> U, result, temp; int x[4]; while (cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, dim, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); arg.inA.load(static_cast<Complex*>(A.data), bulk_cb_idx); arg.inC.load(static_cast<Complex*>(C.data), bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB_shift.loadGhost(static_cast<Complex*>(projected_tmp.data), ghost_idx, dim); B_shift = projected_tmp.reconstruct(dim, 1); result = outerProdSpinTrace(B_shift,A); arg.inD_shift.loadGhost(static_cast<Complex*>(projected_tmp.data), ghost_idx, dim); D_shift = projected_tmp.reconstruct(dim,-1); result += outerProdSpinTrace(D_shift,C); arg.force.load(reinterpret_cast<real*>(temp.data), bulk_cb_idx, dim, arg.parity); arg.gauge.load(reinterpret_cast<real*>(U.data), bulk_cb_idx, dim, arg.parity); result = temp + U*result*arg.coeff; arg.force.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, dim, arg.parity); cb_idx += gridDim.x*blockDim.x; } return; } // exteriorOprodKernel template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> class CloverForce : public Tunable { private: CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> &arg; const GaugeField &meta; QudaFieldLocation location; // location of the lattice fields unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.length; } bool tuneGridDim() const { return false; } public: CloverForce(CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d", sizeof(Float), arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~CloverForce() {} void apply(const hipStream_t &stream){ if(location == QUDA_CUDA_FIELD_LOCATION){ // Disable tuning for the time being TuneParam tp = tuneLaunch(*this,getTuning(),getVerbosity()); if(arg.kernelType == OPROD_INTERIOR_KERNEL){ hipLaunchKernelGGL(( interiorOprodKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } else if(arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0)hipLaunchKernelGGL(( exteriorOprodKernel<0>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); if (arg.dir == 1)hipLaunchKernelGGL(( exteriorOprodKernel<1>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); if (arg.dir == 2)hipLaunchKernelGGL(( exteriorOprodKernel<2>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); if (arg.dir == 3)hipLaunchKernelGGL(( exteriorOprodKernel<3>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); } else { errorQuda("Kernel type not supported\n"); } }else{ // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.force.save(); } void postTune(){ this->arg.force.load(); } long long flops() const { if (arg.kernelType == OPROD_INTERIOR_KERNEL) { return ((long long)arg.length)*4*(24 + 144 + 234); // spin project + spin trace + multiply-add } else { return ((long long)arg.length)*(144 + 234); // spin trace + multiply-add } } long long bytes() const { if (arg.kernelType == OPROD_INTERIOR_KERNEL) { return ((long long)arg.length)*(arg.inA.Bytes() + arg.inC.Bytes() + 4*(arg.inB_shift.Bytes() + arg.inD_shift.Bytes() + 2*arg.force.Bytes() + arg.gauge.Bytes())); } else { return ((long long)arg.length)*(arg.inA.Bytes() + arg.inB_shift.Bytes()/2 + arg.inC.Bytes() + arg.inD_shift.Bytes()/2 + 2*arg.force.Bytes() + arg.gauge.Bytes()); } } TuneKey tuneKey() const { char new_aux[TuneKey::aux_n]; strcpy(new_aux, aux); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { strcat(new_aux, ",interior"); } else { strcat(new_aux, ",exterior"); if (arg.dir==0) strcat(new_aux, ",dir=0"); else if (arg.dir==1) strcat(new_aux, ",dir=1"); else if (arg.dir==2) strcat(new_aux, ",dir=2"); else if (arg.dir==3) strcat(new_aux, ",dir=3"); } return TuneKey(meta.VolString(), typeid(*this).name(), new_aux); } }; // CloverForce void exchangeGhost(cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct bool pack_old = getKernelPackT(); setKernelPackT(true); // first transfer src1 hipDeviceSynchronize(); a.pack(1, 1-parity, dag, Nstream-1, 0); hipDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(1, dag, 2*i); } // commDim(i) } // i=3,..,0 hipDeviceSynchronize(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(1, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(1, 2*i, dag); a.scatter(1, dag, 2*i); } } hipDeviceSynchronize(); setKernelPackT(pack_old); // restore packing state } template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> void computeCloverForceCuda(Output force, Gauge gauge, cudaGaugeField& out, InputA& inA, InputB& inB, InputC &inC, InputD &inD, cudaColorSpinorField& src1, cudaColorSpinorField& src2, const unsigned int parity, const int faceVolumeCB[4], const double coeff) { hipEventRecord(oprodStart, streams[Nstream-1]); unsigned int ghostOffset[4] = {0,0,0,0}; for (int dir=0; dir<4; ++dir) { if (src1.GhostOffset(dir) != src2.GhostOffset(dir)) errorQuda("Mismatched ghost offset[%d] %d != %d", dir, src1.GhostOffset(dir), src2.GhostOffset(dir)); ghostOffset[dir] = (src1.GhostOffset(dir,1))/src1.FieldOrder(); // offset we want is the forwards one } // Create the arguments for the interior kernel CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, coeff, inA, inB, inC, inD, gauge, force, out); CloverForce<Float,Output,Gauge,InputA,InputB,InputC,InputD> oprod(arg, out, QUDA_CUDA_FIELD_LOCATION); int dag = 1; exchangeGhost(src1, parity, dag); exchangeGhost(src2, parity, 1-dag); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src1.VolumeCB(); oprod.apply(0); for (int i=3; i>=0; i--) { if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; arg.length = faceVolumeCB[i]; arg.displacement = 1; // forwards displacement oprod.apply(0); } } // i=3,..,0 } // computeCloverForceCuda #endif // GPU_CLOVER_FORCE void computeCloverForce(cudaGaugeField& force, const cudaGaugeField& U, cudaColorSpinorField& x, cudaColorSpinorField& p, const double coeff) { #ifdef GPU_CLOVER_DIRAC static_cast<cudaColorSpinorField&>(x.Even()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(x.Odd()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(p.Even()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(p.Odd()).allocateGhostBuffer(1); if(force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", force.Order()); if(x.Precision() != force.Precision()) errorQuda("Mixed precision not supported: %d %d\n", x.Precision(), force.Precision()); createCloverForceEvents(); // FIXME not actually used for (int parity=0; parity<2; parity++) { ColorSpinorField& inA = (parity&1) ? p.Odd() : p.Even(); ColorSpinorField& inB = (parity&1) ? x.Even(): x.Odd(); ColorSpinorField& inC = (parity&1) ? x.Odd() : x.Even(); ColorSpinorField& inD = (parity&1) ? p.Even(): p.Odd(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 12, 0, 0> spinorA(inA); Spinor<double2, double2, 12, 0, 1> spinorB(inB); Spinor<double2, double2, 12, 0, 2> spinorC(inC); Spinor<double2, double2, 12, 0, 3> spinorD(inD); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { computeCloverForceCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(force), gauge::FloatNOrder<double,18, 2, 18>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_12) { computeCloverForceCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(force), gauge::FloatNOrder<double,18, 2, 12>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else { errorQuda("Unsupported recontruction type"); } } else if (x.Precision() == QUDA_SINGLE_PRECISION) { #if 0 // FIXME - Spinor class expect float4 pointer not Complex pointer Spinor<float4, float4, float4, 6, 0, 0> spinorA(inA); Spinor<float4, float4, float4, 6, 0, 1> spinorB(inB); Spinor<float4, float4, float4, 6, 0, 2> spinorC(inC); Spinor<float4, float4, float4, 6, 0, 3> spinorD(inD); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { computeCloverForceCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(force), gauge::FloatNOrder<float, 18, 2, 18>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_12) { computeCloverForceCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(force), gauge::FloatNOrder<float, 18, 4, 12>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } #endif } else { errorQuda("Unsupported precision: %d\n", x.Precision()); } } destroyCloverForceEvents(); // not actually used #else // GPU_CLOVER_DIRAC not defined errorQuda("Clover Dirac operator has not been built!"); #endif checkCudaError(); return; } // computeCloverForce } // namespace quda
424bf330916914833aee343e54ef79e6bf117bbc.cu
#include <cstdio> #include <cstdlib> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <color_spinor.h> #include <dslash_quda.h> namespace quda { #ifdef GPU_CLOVER_DIRAC namespace { // anonymous #include <texture.h> } template<int N> void createEventArray(cudaEvent_t (&event)[N], unsigned int flags=cudaEventDefault) { for(int i=0; i<N; ++i) cudaEventCreate(&event[i],flags); return; } template<int N> void destroyEventArray(cudaEvent_t (&event)[N]) { for(int i=0; i<N; ++i) cudaEventDestroy(event[i]); } static cudaEvent_t packEnd; static cudaEvent_t gatherEnd[4]; static cudaEvent_t scatterEnd[4]; static cudaEvent_t oprodStart; static cudaEvent_t oprodEnd; void createCloverForceEvents(){ cudaEventCreate(&packEnd, cudaEventDisableTiming); createEventArray(gatherEnd, cudaEventDisableTiming); createEventArray(scatterEnd, cudaEventDisableTiming); cudaEventCreate(&oprodStart, cudaEventDisableTiming); cudaEventCreate(&oprodEnd, cudaEventDisableTiming); return; } void destroyCloverForceEvents(){ destroyEventArray(gatherEnd); destroyEventArray(scatterEnd); cudaEventDestroy(packEnd); cudaEventDestroy(oprodStart); cudaEventDestroy(oprodEnd); return; } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> struct CloverForceArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; bool partitioned[4]; InputA inA; InputB inB_shift; InputC inC; InputD inD_shift; Gauge gauge; Output force; Float coeff; CloverForceArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType kernelType, const double coeff, InputA& inA, InputB& inB_shift, InputC& inC, InputD& inD_shift, Gauge& gauge, Output& force, GaugeField &meta) : length(meta.VolumeCB()), parity(parity), dir(5), displacement(displacement), kernelType(kernelType), coeff(coeff), inA(inA), inB_shift(inB_shift), inC(inC), inD_shift(inD_shift), gauge(gauge), force(force) { for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int& cb_idx, const int shift[4], const bool partitioned[4], const unsigned int& parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if(partitioned[dim]) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> __global__ void interiorOprodKernel(CloverForceArg<real, Output, Gauge, InputA, InputB, InputC, InputD> arg) { typedef complex<real> Complex; int idx = blockIdx.x*blockDim.x + threadIdx.x; ColorSpinor<real,3,4> A, B_shift, C, D_shift; Matrix<Complex,3> U, result, temp; while (idx<arg.length){ arg.inA.load(static_cast<Complex*>(A.data), idx); arg.inC.load(static_cast<Complex*>(C.data), idx); for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(nbr_idx >= 0){ arg.inB_shift.load(static_cast<Complex*>(B_shift.data), nbr_idx); arg.inD_shift.load(static_cast<Complex*>(D_shift.data), nbr_idx); B_shift = (B_shift.project(dim,1)).reconstruct(dim,1); result = outerProdSpinTrace(B_shift,A); D_shift = (D_shift.project(dim,-1)).reconstruct(dim,-1); result += outerProdSpinTrace(D_shift,C); arg.force.load(reinterpret_cast<real*>(temp.data), idx, dim, arg.parity); arg.gauge.load(reinterpret_cast<real*>(U.data), idx, dim, arg.parity); result = temp + U*result*arg.coeff; arg.force.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } // dir idx += gridDim.x*blockDim.x; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> __global__ void exteriorOprodKernel(CloverForceArg<real, Output, Gauge, InputA, InputB, InputC, InputD> arg) { typedef complex<real> Complex; int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; ColorSpinor<real,3,4> A, B_shift, C, D_shift; ColorSpinor<real,3,2> projected_tmp; Matrix<Complex,3> U, result, temp; int x[4]; while (cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, dim, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); arg.inA.load(static_cast<Complex*>(A.data), bulk_cb_idx); arg.inC.load(static_cast<Complex*>(C.data), bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB_shift.loadGhost(static_cast<Complex*>(projected_tmp.data), ghost_idx, dim); B_shift = projected_tmp.reconstruct(dim, 1); result = outerProdSpinTrace(B_shift,A); arg.inD_shift.loadGhost(static_cast<Complex*>(projected_tmp.data), ghost_idx, dim); D_shift = projected_tmp.reconstruct(dim,-1); result += outerProdSpinTrace(D_shift,C); arg.force.load(reinterpret_cast<real*>(temp.data), bulk_cb_idx, dim, arg.parity); arg.gauge.load(reinterpret_cast<real*>(U.data), bulk_cb_idx, dim, arg.parity); result = temp + U*result*arg.coeff; arg.force.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, dim, arg.parity); cb_idx += gridDim.x*blockDim.x; } return; } // exteriorOprodKernel template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> class CloverForce : public Tunable { private: CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> &arg; const GaugeField &meta; QudaFieldLocation location; // location of the lattice fields unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.length; } bool tuneGridDim() const { return false; } public: CloverForce(CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d", sizeof(Float), arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~CloverForce() {} void apply(const cudaStream_t &stream){ if(location == QUDA_CUDA_FIELD_LOCATION){ // Disable tuning for the time being TuneParam tp = tuneLaunch(*this,getTuning(),getVerbosity()); if(arg.kernelType == OPROD_INTERIOR_KERNEL){ interiorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } else if(arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0) exteriorOprodKernel<0><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); if (arg.dir == 1) exteriorOprodKernel<1><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); if (arg.dir == 2) exteriorOprodKernel<2><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); if (arg.dir == 3) exteriorOprodKernel<3><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); } else { errorQuda("Kernel type not supported\n"); } }else{ // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.force.save(); } void postTune(){ this->arg.force.load(); } long long flops() const { if (arg.kernelType == OPROD_INTERIOR_KERNEL) { return ((long long)arg.length)*4*(24 + 144 + 234); // spin project + spin trace + multiply-add } else { return ((long long)arg.length)*(144 + 234); // spin trace + multiply-add } } long long bytes() const { if (arg.kernelType == OPROD_INTERIOR_KERNEL) { return ((long long)arg.length)*(arg.inA.Bytes() + arg.inC.Bytes() + 4*(arg.inB_shift.Bytes() + arg.inD_shift.Bytes() + 2*arg.force.Bytes() + arg.gauge.Bytes())); } else { return ((long long)arg.length)*(arg.inA.Bytes() + arg.inB_shift.Bytes()/2 + arg.inC.Bytes() + arg.inD_shift.Bytes()/2 + 2*arg.force.Bytes() + arg.gauge.Bytes()); } } TuneKey tuneKey() const { char new_aux[TuneKey::aux_n]; strcpy(new_aux, aux); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { strcat(new_aux, ",interior"); } else { strcat(new_aux, ",exterior"); if (arg.dir==0) strcat(new_aux, ",dir=0"); else if (arg.dir==1) strcat(new_aux, ",dir=1"); else if (arg.dir==2) strcat(new_aux, ",dir=2"); else if (arg.dir==3) strcat(new_aux, ",dir=3"); } return TuneKey(meta.VolString(), typeid(*this).name(), new_aux); } }; // CloverForce void exchangeGhost(cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct bool pack_old = getKernelPackT(); setKernelPackT(true); // first transfer src1 cudaDeviceSynchronize(); a.pack(1, 1-parity, dag, Nstream-1, 0); cudaDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(1, dag, 2*i); } // commDim(i) } // i=3,..,0 cudaDeviceSynchronize(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(1, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(1, 2*i, dag); a.scatter(1, dag, 2*i); } } cudaDeviceSynchronize(); setKernelPackT(pack_old); // restore packing state } template<typename Float, typename Output, typename Gauge, typename InputA, typename InputB, typename InputC, typename InputD> void computeCloverForceCuda(Output force, Gauge gauge, cudaGaugeField& out, InputA& inA, InputB& inB, InputC &inC, InputD &inD, cudaColorSpinorField& src1, cudaColorSpinorField& src2, const unsigned int parity, const int faceVolumeCB[4], const double coeff) { cudaEventRecord(oprodStart, streams[Nstream-1]); unsigned int ghostOffset[4] = {0,0,0,0}; for (int dir=0; dir<4; ++dir) { if (src1.GhostOffset(dir) != src2.GhostOffset(dir)) errorQuda("Mismatched ghost offset[%d] %d != %d", dir, src1.GhostOffset(dir), src2.GhostOffset(dir)); ghostOffset[dir] = (src1.GhostOffset(dir,1))/src1.FieldOrder(); // offset we want is the forwards one } // Create the arguments for the interior kernel CloverForceArg<Float,Output,Gauge,InputA,InputB,InputC,InputD> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, coeff, inA, inB, inC, inD, gauge, force, out); CloverForce<Float,Output,Gauge,InputA,InputB,InputC,InputD> oprod(arg, out, QUDA_CUDA_FIELD_LOCATION); int dag = 1; exchangeGhost(src1, parity, dag); exchangeGhost(src2, parity, 1-dag); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src1.VolumeCB(); oprod.apply(0); for (int i=3; i>=0; i--) { if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; arg.length = faceVolumeCB[i]; arg.displacement = 1; // forwards displacement oprod.apply(0); } } // i=3,..,0 } // computeCloverForceCuda #endif // GPU_CLOVER_FORCE void computeCloverForce(cudaGaugeField& force, const cudaGaugeField& U, cudaColorSpinorField& x, cudaColorSpinorField& p, const double coeff) { #ifdef GPU_CLOVER_DIRAC static_cast<cudaColorSpinorField&>(x.Even()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(x.Odd()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(p.Even()).allocateGhostBuffer(1); static_cast<cudaColorSpinorField&>(p.Odd()).allocateGhostBuffer(1); if(force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", force.Order()); if(x.Precision() != force.Precision()) errorQuda("Mixed precision not supported: %d %d\n", x.Precision(), force.Precision()); createCloverForceEvents(); // FIXME not actually used for (int parity=0; parity<2; parity++) { ColorSpinorField& inA = (parity&1) ? p.Odd() : p.Even(); ColorSpinorField& inB = (parity&1) ? x.Even(): x.Odd(); ColorSpinorField& inC = (parity&1) ? x.Odd() : x.Even(); ColorSpinorField& inD = (parity&1) ? p.Even(): p.Odd(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 12, 0, 0> spinorA(inA); Spinor<double2, double2, 12, 0, 1> spinorB(inB); Spinor<double2, double2, 12, 0, 2> spinorC(inC); Spinor<double2, double2, 12, 0, 3> spinorD(inD); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { computeCloverForceCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(force), gauge::FloatNOrder<double,18, 2, 18>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_12) { computeCloverForceCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(force), gauge::FloatNOrder<double,18, 2, 12>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else { errorQuda("Unsupported recontruction type"); } } else if (x.Precision() == QUDA_SINGLE_PRECISION) { #if 0 // FIXME - Spinor class expect float4 pointer not Complex pointer Spinor<float4, float4, float4, 6, 0, 0> spinorA(inA); Spinor<float4, float4, float4, 6, 0, 1> spinorB(inB); Spinor<float4, float4, float4, 6, 0, 2> spinorC(inC); Spinor<float4, float4, float4, 6, 0, 3> spinorD(inD); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { computeCloverForceCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(force), gauge::FloatNOrder<float, 18, 2, 18>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_12) { computeCloverForceCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(force), gauge::FloatNOrder<float, 18, 4, 12>(U), force, spinorA, spinorB, spinorC, spinorD, static_cast<cudaColorSpinorField&>(inB), static_cast<cudaColorSpinorField&>(inD), parity, inB.GhostFace(), coeff); } #endif } else { errorQuda("Unsupported precision: %d\n", x.Precision()); } } destroyCloverForceEvents(); // not actually used #else // GPU_CLOVER_DIRAC not defined errorQuda("Clover Dirac operator has not been built!"); #endif checkCudaError(); return; } // computeCloverForce } // namespace quda
ff5a07fb83a42a44951bc1e448d87026d7ca18b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix Vector multiplication with fliped source and destination pointer on copy in, in line 109. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ hipError_t cuErr = call; \ if(hipSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( hipMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_c, C*sizeof(int))); //Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device cudaErrorCheck( hipMemcpy(d_a,a,C*sizeof(int),hipMemcpyHostToDevice)); cudaErrorCheck( hipMemcpy(d_b,b,C*C*sizeof(int),hipMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel hipLaunchKernelGGL(( Mult), dim3(B),dim3(T), 0, 0, d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( hipGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( hipDeviceSynchronize()); //Copying back the result d_c from the device to the host array c //Flipped source and destination pointer cudaErrorCheck( hipMemcpy(d_c,c,C*sizeof(int),hipMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( hipFree(d_a)); cudaErrorCheck( hipFree(d_b)); cudaErrorCheck( hipFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
ff5a07fb83a42a44951bc1e448d87026d7ca18b1.cu
/* Matrix Vector multiplication with fliped source and destination pointer on copy in, in line 109. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( cudaMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_c, C*sizeof(int))); //Copying the array a and the matrix b from the host to the array d_a and the matrix d_b on the device cudaErrorCheck( cudaMemcpy(d_a,a,C*sizeof(int),cudaMemcpyHostToDevice)); cudaErrorCheck( cudaMemcpy(d_b,b,C*C*sizeof(int),cudaMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel Mult<<<B,T>>>(d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( cudaGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( cudaDeviceSynchronize()); //Copying back the result d_c from the device to the host array c //Flipped source and destination pointer cudaErrorCheck( cudaMemcpy(d_c,c,C*sizeof(int),cudaMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( cudaFree(d_a)); cudaErrorCheck( cudaFree(d_b)); cudaErrorCheck( cudaFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
69137489dcb14953e77b49dad175b911a61488d4.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
69137489dcb14953e77b49dad175b911a61488d4.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
f4dcbb22c99768e4113e4d4e19f6ee71a87abb9c.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/visualizer/render_option.h" #include "cupoch/visualization/utility/color_map.h" #include <thrust/iterator/constant_iterator.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Coordinates of 8 vertices in a cuboid (assume origin (0,0,0), size 1) __device__ const int cuboid_vertex_offsets[8][3] = { {0, 0, 0}, {1, 0, 0}, {0, 1, 0}, {1, 1, 0}, {0, 0, 1}, {1, 0, 1}, {0, 1, 1}, {1, 1, 1}, }; // Vertex indices of 12 triangles in a cuboid, for right-handed manifold mesh __device__ const int cuboid_triangles_vertex_indices[12][3] = { {0, 2, 1}, {0, 1, 4}, {0, 4, 2}, {5, 1, 7}, {5, 7, 4}, {5, 4, 1}, {3, 7, 1}, {3, 1, 2}, {3, 2, 7}, {6, 4, 7}, {6, 7, 2}, {6, 2, 4}, }; // Vertex indices of 12 lines in a cuboid __device__ const int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor{ copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl& view) : has_colors_(has_colors), color_option_(color_option), view_(view) {}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp = color; } else { color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor(const thrust::pair<Eigen::Vector3f, Eigen::Vector3f>* line_coords, const Eigen::Vector3f* line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors) {}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f>* line_coords_; const Eigen::Vector3f* line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector3f color_tmp = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Zero(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f* points) : points_(points) {}; const Eigen::Vector3f* points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator() (const Eigen::Vector2i& idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const int* triangles, const Eigen::Vector3f* vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const int* triangles_; const Eigen::Vector3f* vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { size_t vi = triangles_[k]; const auto& vertex = vertices_[vi]; Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct compute_voxel_vertices_functor { compute_voxel_vertices_functor(const geometry::Voxel* voxels, const Eigen::Vector3f& origin, float voxel_size) : voxels_(voxels), origin_(origin), voxel_size_(voxel_size) {}; const geometry::Voxel* voxels_; const Eigen::Vector3f origin_; const float voxel_size_; __device__ Eigen::Vector3f operator() (size_t idx) const { int i = idx / 8; int j = idx % 8; const geometry::Voxel &voxel = voxels_[i]; // 8 vertices in a voxel Eigen::Vector3f base_vertex = origin_ + voxel.grid_index_.cast<float>() * voxel_size_; const auto offset_v = Eigen::Vector3f(cuboid_vertex_offsets[j][0], cuboid_vertex_offsets[j][1], cuboid_vertex_offsets[j][2]); return base_vertex + offset_v * voxel_size_; } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f* vertices, const geometry::Voxel* voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const geometry::Voxel* voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple(vertices_[cuboid_lines_vertex_indices[j][k]], voxel_color); } }; struct copy_voxelgrid_face_functor { copy_voxelgrid_face_functor(const Eigen::Vector3f* vertices, const geometry::Voxel* voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const geometry::Voxel* voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int i = idx / (12 * 3); int jk = idx % (12 * 3); int j = jk / 3; int k = jk % 3; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple(vertices_[cuboid_triangles_vertex_indices[j][k]], voxel_color); } }; } bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_colors_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform(make_tuple_iterator(pointcloud.points_.begin(), pointcloud.colors_.begin()), make_tuple_iterator(pointcloud.points_.end(), pointcloud.colors_.end()), make_tuple_iterator(points, colors), func); } else { thrust::transform(make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>(Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>(Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); thrust::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line(thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize(const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices(voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1(thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2(thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; } bool SimpleShaderForVoxelGridFace::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridFace::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices(voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1(thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 3; copy_voxelgrid_face_functor func2(thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridFace::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 3; }
f4dcbb22c99768e4113e4d4e19f6ee71a87abb9c.cu
#include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/visualizer/render_option.h" #include "cupoch/visualization/utility/color_map.h" #include <thrust/iterator/constant_iterator.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Coordinates of 8 vertices in a cuboid (assume origin (0,0,0), size 1) __device__ const int cuboid_vertex_offsets[8][3] = { {0, 0, 0}, {1, 0, 0}, {0, 1, 0}, {1, 1, 0}, {0, 0, 1}, {1, 0, 1}, {0, 1, 1}, {1, 1, 1}, }; // Vertex indices of 12 triangles in a cuboid, for right-handed manifold mesh __device__ const int cuboid_triangles_vertex_indices[12][3] = { {0, 2, 1}, {0, 1, 4}, {0, 4, 2}, {5, 1, 7}, {5, 7, 4}, {5, 4, 1}, {3, 7, 1}, {3, 1, 2}, {3, 2, 7}, {6, 4, 7}, {6, 7, 2}, {6, 2, 4}, }; // Vertex indices of 12 lines in a cuboid __device__ const int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor{ copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl& view) : has_colors_(has_colors), color_option_(color_option), view_(view) {}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp = color; } else { color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor(const thrust::pair<Eigen::Vector3f, Eigen::Vector3f>* line_coords, const Eigen::Vector3f* line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors) {}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f>* line_coords_; const Eigen::Vector3f* line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector3f color_tmp = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Zero(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f* points) : points_(points) {}; const Eigen::Vector3f* points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator() (const Eigen::Vector2i& idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const int* triangles, const Eigen::Vector3f* vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const int* triangles_; const Eigen::Vector3f* vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { size_t vi = triangles_[k]; const auto& vertex = vertices_[vi]; Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct compute_voxel_vertices_functor { compute_voxel_vertices_functor(const geometry::Voxel* voxels, const Eigen::Vector3f& origin, float voxel_size) : voxels_(voxels), origin_(origin), voxel_size_(voxel_size) {}; const geometry::Voxel* voxels_; const Eigen::Vector3f origin_; const float voxel_size_; __device__ Eigen::Vector3f operator() (size_t idx) const { int i = idx / 8; int j = idx % 8; const geometry::Voxel &voxel = voxels_[i]; // 8 vertices in a voxel Eigen::Vector3f base_vertex = origin_ + voxel.grid_index_.cast<float>() * voxel_size_; const auto offset_v = Eigen::Vector3f(cuboid_vertex_offsets[j][0], cuboid_vertex_offsets[j][1], cuboid_vertex_offsets[j][2]); return base_vertex + offset_v * voxel_size_; } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f* vertices, const geometry::Voxel* voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const geometry::Voxel* voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple(vertices_[cuboid_lines_vertex_indices[j][k]], voxel_color); } }; struct copy_voxelgrid_face_functor { copy_voxelgrid_face_functor(const Eigen::Vector3f* vertices, const geometry::Voxel* voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f& default_mesh_color, const ViewControl& view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view) {}; const Eigen::Vector3f* vertices_; const geometry::Voxel* voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int i = idx / (12 * 3); int jk = idx % (12 * 3); int j = jk / 3; int k = jk % 3; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage(vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage(vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage(vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple(vertices_[cuboid_triangles_vertex_indices[j][k]], voxel_color); } }; } bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_colors_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform(make_tuple_iterator(pointcloud.points_.begin(), pointcloud.colors_.begin()), make_tuple_iterator(pointcloud.points_.end(), pointcloud.colors_.end()), make_tuple_iterator(points, colors), func); } else { thrust::transform(make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>(Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>(Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); thrust::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line(thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize(const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices(voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1(thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2(thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; } bool SimpleShaderForVoxelGridFace::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridFace::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices(voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1(thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 3; copy_voxelgrid_face_functor func2(thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridFace::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 3; }
f436e23f5207e7300968a33c6256758f307497bc.hip
// !!! This is a file automatically generated by hipify!!! #define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "needle.h" #include <hip/hip_runtime.h> #include <sys/time.h> // includes, kernels #include "needle_kernel.hip" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d \n", BLOCK_SIZE); runTest( argc, argv); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); penalty = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; size = max_cols * max_rows; hipMalloc((void**)& referrence_cuda, sizeof(int)*size); hipMalloc((void**)& matrix_cuda, sizeof(int)*size); hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice); hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice); dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } printf("Processing bottom-right matrix\n"); // cke int num_streams = 2; hipStream_t *streams = (hipStream_t *) malloc(num_streams * sizeof(hipStream_t)); for (int i = 0; i < num_streams; i++) hipStreamCreate(&(streams[i])); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; for(int sid=0; sid < num_streams; sid++) { hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, streams[sid], referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } } hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost); //#define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif for (int i = 0; i < num_streams; i++) hipStreamDestroy(streams[i]); hipFree(referrence_cuda); hipFree(matrix_cuda); free(referrence); free(input_itemsets); free(output_itemsets); }
f436e23f5207e7300968a33c6256758f307497bc.cu
#define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "needle.h" #include <cuda.h> #include <sys/time.h> // includes, kernels #include "needle_kernel.cu" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d \n", BLOCK_SIZE); runTest( argc, argv); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); penalty = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; size = max_cols * max_rows; cudaMalloc((void**)& referrence_cuda, sizeof(int)*size); cudaMalloc((void**)& matrix_cuda, sizeof(int)*size); cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice); cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice); dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } printf("Processing bottom-right matrix\n"); // cke int num_streams = 2; cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); for (int i = 0; i < num_streams; i++) cudaStreamCreate(&(streams[i])); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; for(int sid=0; sid < num_streams; sid++) { needle_cuda_shared_2<<<dimGrid, dimBlock, 0, streams[sid]>>>(referrence_cuda, matrix_cuda ,max_cols, penalty, i, block_width); } } cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost); //#define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif for (int i = 0; i < num_streams; i++) cudaStreamDestroy(streams[i]); cudaFree(referrence_cuda); cudaFree(matrix_cuda); free(referrence); free(input_itemsets); free(output_itemsets); }
da483065172a3b7812d5604cac40fb137ddefb75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2> *dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2> *>(output + tid * 2); cutlass::Array<float, 2> const *src_ptr = reinterpret_cast<cutlass::Array<float, 2> const *>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); hipLaunchKernelGGL(( convert_bf16_f32), dim3(dim3(1,1)), dim3(dim3(N, 1)) , 0, 0, destination.device_data(), source.device_data(), N); ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); hipLaunchKernelGGL(( convert_and_pack_bf16), dim3(dim3(1,1)), dim3(dim3(N, 1)) , 0, 0, destination.device_data(), source.device_data(), N); ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = { {0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0} }; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const &>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
da483065172a3b7812d5604cac40fb137ddefb75.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2> *dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2> *>(output + tid * 2); cutlass::Array<float, 2> const *src_ptr = reinterpret_cast<cutlass::Array<float, 2> const *>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); convert_bf16_f32<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); convert_and_pack_bf16<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = { {0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0} }; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const &>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
6adb30fbf315b2811d79bdc65f481e4c82d1781d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ParallelSolver.hpp" //Define device variables for stopping condition __device__ unsigned int d_not_tolerent; __device__ double d_marker; __device__ unsigned int d_same; __device__ unsigned int d_pos_of_same; __global__ void reset_d_not_tolerent (){ d_not_tolerent = 0; } //Calculate jacobi step for each element seperatly __global__ void calc_jacobi_step(int n,double *A,double *b,double *x, double *residual){ int i = blockIdx.x * blockDim.x + threadIdx.x; double new_component=0; double zw = 0; for(int j=0;j<n;j++){ zw += A[j*n+i]*x[j]; } if(A[i*n+i]!= 0.0){ new_component = (b[i]- zw)/A[i*n+i]+x[i]; residual[i]=new_component-x[i]; }else{ residual[i]=0; } } //Check if solution has converged and update new solution __global__ void update_and_check_tol(double *x, double *residual,double tol){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(std::abs(residual[i])>tol){ if(d_marker !=residual[i] || d_pos_of_same != i){ d_not_tolerent=1; d_marker = residual[i]; d_pos_of_same =i; d_same =0; }else{ if(d_same<10){ d_not_tolerent=1; } d_same++; } } x[i]+=residual[i]; } //external functions namespace CUDA { Eigen::VectorXd parallel_LU_pivot(Eigen::MatrixXd &A,Eigen::VectorXd &b){ hipsolverDnHandle_t cusolverH = NULL; hipStream_t stream = NULL; cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS; hipError_t cudaStat1 = hipSuccess; hipError_t cudaStat2 = hipSuccess; hipError_t cudaStat3 = hipSuccess; hipError_t cudaStat4 = hipSuccess; const int m = A.cols(); const int lda = A.cols(); const int ldb = b.rows(); Eigen::VectorXd x=Eigen::VectorXd::Zero(m); // x = A\B int info = 0; // host copy of error info double *d_A = nullptr; // device copy of A double *d_b = nullptr; // device copy of B int *d_Ipiv = nullptr; // pivoting sequence int *d_info = nullptr; // error info int lwork = 0; // size of workspace double *d_work = nullptr; // device workspace for getrf status = hipsolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); assert(hipSuccess == cudaStat1); status = hipsolverDnSetStream(cusolverH, stream); assert(CUSOLVER_STATUS_SUCCESS == status); ///////////////// // Copy to GPU // ///////////////// cudaStat1 = hipMalloc ((void**)&d_A, sizeof(double) * lda * m); cudaStat2 = hipMalloc ((void**)&d_b, sizeof(double) * m); cudaStat3 = hipMalloc ((void**)&d_Ipiv, sizeof(int) * m); cudaStat4 = hipMalloc ((void**)&d_info, sizeof(int)); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); assert(hipSuccess == cudaStat4); cudaStat1 = hipMemcpy(d_A, A.data(), sizeof(double)*lda*m, hipMemcpyHostToDevice); cudaStat2 = hipMemcpy(d_b, b.data(), sizeof(double)*m, hipMemcpyHostToDevice); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); ///////////////////// // Query workspace // ///////////////////// status = hipsolverDnDgetrf_bufferSize( cusolverH, m, m, d_A, lda, &lwork); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = hipMalloc((void**)&d_work, sizeof(double)*lwork); assert(hipSuccess == cudaStat1); ////////////////////// // LU factorization // ////////////////////// status = hipsolverDnDgetrf( cusolverH, m, m, d_A, lda, d_work, d_Ipiv, d_info); cudaStat1 = hipDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(hipSuccess == cudaStat1); cudaStat1 = hipMemcpy(&info, d_info, sizeof(int), hipMemcpyDeviceToHost); assert(hipSuccess == cudaStat1); ///////////////////// // solve A*x = b // ///////////////////// status = hipsolverDnDgetrs( cusolverH, HIPBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, d_Ipiv, d_b, ldb, d_info); cudaStat1 = hipDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(hipSuccess == cudaStat1); cudaStat1 = hipMemcpy(x.data(), d_b, sizeof(double)*m, hipMemcpyDeviceToHost); assert(hipSuccess == cudaStat1); ///////////////////// // free recourses // ///////////////////// if (d_A ) hipFree(d_A); if (d_b ) hipFree(d_b); if (d_Ipiv ) hipFree(d_Ipiv); if (d_info ) hipFree(d_info); if (d_work ) hipFree(d_work); if (cusolverH ) hipsolverDnDestroy(cusolverH); if (stream ) hipStreamDestroy(stream); hipDeviceReset(); return x; } Eigen::VectorXd parallel_Jacobi_method(Eigen::MatrixXd &A,Eigen::VectorXd &b,double error){ hipError_t cudaStat1 = hipSuccess; hipError_t cudaStat2 = hipSuccess; hipError_t cudaStat3 = hipSuccess; hipError_t cudaStat4 = hipSuccess; hipError_t cudaStat5 = hipSuccess; hipError_t cudaStat6 = hipSuccess; hipError_t cudaStat7 = hipSuccess; int n = A.cols(); Eigen::VectorXd x_0 = b; double *d_A = nullptr; // device copy of A double *d_b = nullptr; // device copy of b double *d_x = nullptr; // iterative solution double *d_residual = nullptr; bool *d_isfinished = nullptr; bool *d_component_finished =nullptr; ///////////////// // Copy to GPU // ///////////////// cudaStat1 = hipMalloc (&d_A, sizeof(double)*n*n); cudaStat2 = hipMalloc (&d_b, sizeof(double)*n); cudaStat3 = hipMalloc (&d_x, sizeof(double)*n); cudaStat4 = hipMalloc (&d_residual, sizeof(double)*n); cudaStat5 = hipMalloc (&d_isfinished, sizeof(bool)); // cudaStat6 = hipMalloc (&d_n, sizeof(int)); cudaStat7 = hipMalloc (&d_component_finished, sizeof(bool)*n); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); assert(hipSuccess == cudaStat4); assert(hipSuccess == cudaStat5); assert(hipSuccess == cudaStat6); assert(hipSuccess == cudaStat7); cudaStat1 = hipMemcpy(d_A, A.data(), sizeof(double)*n*n, hipMemcpyHostToDevice); cudaStat2 = hipMemcpy(d_b, b.data(), sizeof(double)*n, hipMemcpyHostToDevice); cudaStat3 = hipMemcpy(d_x, x_0.data(), sizeof(double)*n, hipMemcpyHostToDevice); // cudaStat4 = hipMemcpy(d_x, &n, sizeof(int), hipMemcpyHostToDevice); assert(hipSuccess == cudaStat1); assert(hipSuccess == cudaStat2); assert(hipSuccess == cudaStat3); int blockSize = 16; //best performance for 16 threads int numBlocks = (n + blockSize - 1) / blockSize; int stop_after =100000; int counter = 0; typeof(d_not_tolerent) h_not_tolerent=1; //////////////////////////// // Calculate jacobi steps // //////////////////////////// while(counter < stop_after && h_not_tolerent){ hipLaunchKernelGGL(( calc_jacobi_step), dim3(numBlocks), dim3(blockSize), 0, 0, n,d_A,d_b,d_x,d_residual); hipLaunchKernelGGL(( update_and_check_tol), dim3(numBlocks), dim3(blockSize), 0, 0, d_x, d_residual, error); if(counter%10 ==0){ hipMemcpyFromSymbol(&h_not_tolerent, d_not_tolerent, sizeof(d_not_tolerent)); hipLaunchKernelGGL(( reset_d_not_tolerent), dim3(1), dim3(1), 0, 0, ); } counter++; } //Copy solution to host cudaStat1 = hipMemcpy(x_0.data(), d_x, sizeof(double)*n, hipMemcpyDeviceToHost); assert(hipSuccess == cudaStat1); ///////////////////// // free recourses // ///////////////////// if (d_A ) hipFree(d_A); if (d_b ) hipFree(d_b); if (d_x ) hipFree(d_x); if (d_residual ) hipFree(d_residual); if (d_isfinished ) hipFree(d_isfinished); if (d_component_finished ) hipFree(d_component_finished); hipDeviceReset(); return x_0; } }
6adb30fbf315b2811d79bdc65f481e4c82d1781d.cu
#include "ParallelSolver.hpp" //Define device variables for stopping condition __device__ unsigned int d_not_tolerent; __device__ double d_marker; __device__ unsigned int d_same; __device__ unsigned int d_pos_of_same; __global__ void reset_d_not_tolerent (){ d_not_tolerent = 0; } //Calculate jacobi step for each element seperatly __global__ void calc_jacobi_step(int n,double *A,double *b,double *x, double *residual){ int i = blockIdx.x * blockDim.x + threadIdx.x; double new_component=0; double zw = 0; for(int j=0;j<n;j++){ zw += A[j*n+i]*x[j]; } if(A[i*n+i]!= 0.0){ new_component = (b[i]- zw)/A[i*n+i]+x[i]; residual[i]=new_component-x[i]; }else{ residual[i]=0; } } //Check if solution has converged and update new solution __global__ void update_and_check_tol(double *x, double *residual,double tol){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(std::abs(residual[i])>tol){ if(d_marker !=residual[i] || d_pos_of_same != i){ d_not_tolerent=1; d_marker = residual[i]; d_pos_of_same =i; d_same =0; }else{ if(d_same<10){ d_not_tolerent=1; } d_same++; } } x[i]+=residual[i]; } //external functions namespace CUDA { Eigen::VectorXd parallel_LU_pivot(Eigen::MatrixXd &A,Eigen::VectorXd &b){ cusolverDnHandle_t cusolverH = NULL; cudaStream_t stream = NULL; cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; cudaError_t cudaStat4 = cudaSuccess; const int m = A.cols(); const int lda = A.cols(); const int ldb = b.rows(); Eigen::VectorXd x=Eigen::VectorXd::Zero(m); // x = A\B int info = 0; // host copy of error info double *d_A = nullptr; // device copy of A double *d_b = nullptr; // device copy of B int *d_Ipiv = nullptr; // pivoting sequence int *d_info = nullptr; // error info int lwork = 0; // size of workspace double *d_work = nullptr; // device workspace for getrf status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); assert(cudaSuccess == cudaStat1); status = cusolverDnSetStream(cusolverH, stream); assert(CUSOLVER_STATUS_SUCCESS == status); ///////////////// // Copy to GPU // ///////////////// cudaStat1 = cudaMalloc ((void**)&d_A, sizeof(double) * lda * m); cudaStat2 = cudaMalloc ((void**)&d_b, sizeof(double) * m); cudaStat3 = cudaMalloc ((void**)&d_Ipiv, sizeof(int) * m); cudaStat4 = cudaMalloc ((void**)&d_info, sizeof(int)); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); cudaStat1 = cudaMemcpy(d_A, A.data(), sizeof(double)*lda*m, cudaMemcpyHostToDevice); cudaStat2 = cudaMemcpy(d_b, b.data(), sizeof(double)*m, cudaMemcpyHostToDevice); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); ///////////////////// // Query workspace // ///////////////////// status = cusolverDnDgetrf_bufferSize( cusolverH, m, m, d_A, lda, &lwork); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = cudaMalloc((void**)&d_work, sizeof(double)*lwork); assert(cudaSuccess == cudaStat1); ////////////////////// // LU factorization // ////////////////////// status = cusolverDnDgetrf( cusolverH, m, m, d_A, lda, d_work, d_Ipiv, d_info); cudaStat1 = cudaDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMemcpy(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); ///////////////////// // solve A*x = b // ///////////////////// status = cusolverDnDgetrs( cusolverH, CUBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, d_Ipiv, d_b, ldb, d_info); cudaStat1 = cudaDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMemcpy(x.data(), d_b, sizeof(double)*m, cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); ///////////////////// // free recourses // ///////////////////// if (d_A ) cudaFree(d_A); if (d_b ) cudaFree(d_b); if (d_Ipiv ) cudaFree(d_Ipiv); if (d_info ) cudaFree(d_info); if (d_work ) cudaFree(d_work); if (cusolverH ) cusolverDnDestroy(cusolverH); if (stream ) cudaStreamDestroy(stream); cudaDeviceReset(); return x; } Eigen::VectorXd parallel_Jacobi_method(Eigen::MatrixXd &A,Eigen::VectorXd &b,double error){ cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; cudaError_t cudaStat4 = cudaSuccess; cudaError_t cudaStat5 = cudaSuccess; cudaError_t cudaStat6 = cudaSuccess; cudaError_t cudaStat7 = cudaSuccess; int n = A.cols(); Eigen::VectorXd x_0 = b; double *d_A = nullptr; // device copy of A double *d_b = nullptr; // device copy of b double *d_x = nullptr; // iterative solution double *d_residual = nullptr; bool *d_isfinished = nullptr; bool *d_component_finished =nullptr; ///////////////// // Copy to GPU // ///////////////// cudaStat1 = cudaMalloc (&d_A, sizeof(double)*n*n); cudaStat2 = cudaMalloc (&d_b, sizeof(double)*n); cudaStat3 = cudaMalloc (&d_x, sizeof(double)*n); cudaStat4 = cudaMalloc (&d_residual, sizeof(double)*n); cudaStat5 = cudaMalloc (&d_isfinished, sizeof(bool)); // cudaStat6 = cudaMalloc (&d_n, sizeof(int)); cudaStat7 = cudaMalloc (&d_component_finished, sizeof(bool)*n); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); assert(cudaSuccess == cudaStat5); assert(cudaSuccess == cudaStat6); assert(cudaSuccess == cudaStat7); cudaStat1 = cudaMemcpy(d_A, A.data(), sizeof(double)*n*n, cudaMemcpyHostToDevice); cudaStat2 = cudaMemcpy(d_b, b.data(), sizeof(double)*n, cudaMemcpyHostToDevice); cudaStat3 = cudaMemcpy(d_x, x_0.data(), sizeof(double)*n, cudaMemcpyHostToDevice); // cudaStat4 = cudaMemcpy(d_x, &n, sizeof(int), cudaMemcpyHostToDevice); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); int blockSize = 16; //best performance for 16 threads int numBlocks = (n + blockSize - 1) / blockSize; int stop_after =100000; int counter = 0; typeof(d_not_tolerent) h_not_tolerent=1; //////////////////////////// // Calculate jacobi steps // //////////////////////////// while(counter < stop_after && h_not_tolerent){ calc_jacobi_step<<<numBlocks, blockSize>>>(n,d_A,d_b,d_x,d_residual); update_and_check_tol<<<numBlocks, blockSize>>>(d_x, d_residual, error); if(counter%10 ==0){ cudaMemcpyFromSymbol(&h_not_tolerent, d_not_tolerent, sizeof(d_not_tolerent)); reset_d_not_tolerent<<<1, 1>>>(); } counter++; } //Copy solution to host cudaStat1 = cudaMemcpy(x_0.data(), d_x, sizeof(double)*n, cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); ///////////////////// // free recourses // ///////////////////// if (d_A ) cudaFree(d_A); if (d_b ) cudaFree(d_b); if (d_x ) cudaFree(d_x); if (d_residual ) cudaFree(d_residual); if (d_isfinished ) cudaFree(d_isfinished); if (d_component_finished ) cudaFree(d_component_finished); cudaDeviceReset(); return x_0; } }
2a12690fd41ead1d1735999f7ee9a2ac8fdf75ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void staticReverse(int *d, int n) { __shared__ int s[64]; int t = threadIdx.x; int tr = n-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } __global__ void dynamicReverse(int *d, int n) { extern __shared__ int s[]; int t = threadIdx.x; int tr = n-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } int main(void) { const int n = 64; int a[n], r[n], d[n]; for (int i = 0; i < n; i++) { a[i] = i; r[i] = n-i-1; d[i] = 0; } int *d_d; hipMalloc(&d_d, n * sizeof(int)); // run version with static shared memory hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( staticReverse), dim3(1),dim3(n), 0, 0, d_d, n); hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]); // run dynamic shared memory version hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dynamicReverse), dim3(1),dim3(n),n*sizeof(int), 0, d_d, n); hipMemcpy(d, d_d, n * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++){ if (d[i] != r[i]) { printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]); } else{ printf("Correct result at %d index.\n", i); } } }
2a12690fd41ead1d1735999f7ee9a2ac8fdf75ca.cu
#include "cuda.h" #include <stdio.h> #include <stdlib.h> __global__ void staticReverse(int *d, int n) { __shared__ int s[64]; int t = threadIdx.x; int tr = n-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } __global__ void dynamicReverse(int *d, int n) { extern __shared__ int s[]; int t = threadIdx.x; int tr = n-t-1; s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } int main(void) { const int n = 64; int a[n], r[n], d[n]; for (int i = 0; i < n; i++) { a[i] = i; r[i] = n-i-1; d[i] = 0; } int *d_d; cudaMalloc(&d_d, n * sizeof(int)); // run version with static shared memory cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice); staticReverse<<<1,n>>>(d_d, n); cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]); // run dynamic shared memory version cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice); dynamicReverse<<<1,n,n*sizeof(int)>>>(d_d, n); cudaMemcpy(d, d_d, n * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++){ if (d[i] != r[i]) { printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]); } else{ printf("Correct result at %d index.\n", i); } } }
04285dfee952b4dde68f11ad62870ee8a04636c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> __device__ int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void bev_occupy_gpu_kernel(int batch_size, int input_point_num, int output_w, int output_l, float resolution, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_occupy) { const int output_img_size = output_w * output_l; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution); int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution); int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int output_idx = batch_id * output_img_size + center_grid_coor_x * output_l + center_grid_coor_y; atomicAdd(&output_occupy[output_idx], 1); } } void bev_occupy_gpu_launcher(int batch_size, int input_point_num, int output_w, int output_l, float resolution, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_occupy) { if (batch_size*input_point_num <=0) { printf("BevOccupyOp ERROR: Invalid CUDA input dimensions.\n"); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bev_occupy_gpu_kernel, 0, input_point_num); gridSize = (input_point_num + blockSize - 1) / blockSize; hipLaunchKernelGGL(( bev_occupy_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, batch_size, input_point_num, output_w, output_l, resolution, input_coors, input_num_list, input_accu_list, output_occupy); }
04285dfee952b4dde68f11ad62870ee8a04636c7.cu
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> __device__ int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void bev_occupy_gpu_kernel(int batch_size, int input_point_num, int output_w, int output_l, float resolution, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_occupy) { const int output_img_size = output_w * output_l; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution); int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution); int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int output_idx = batch_id * output_img_size + center_grid_coor_x * output_l + center_grid_coor_y; atomicAdd(&output_occupy[output_idx], 1); } } void bev_occupy_gpu_launcher(int batch_size, int input_point_num, int output_w, int output_l, float resolution, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_occupy) { if (batch_size*input_point_num <=0) { printf("BevOccupyOp ERROR: Invalid CUDA input dimensions.\n"); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bev_occupy_gpu_kernel, 0, input_point_num); gridSize = (input_point_num + blockSize - 1) / blockSize; bev_occupy_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num, output_w, output_l, resolution, input_coors, input_num_list, input_accu_list, output_occupy); }
3ef0a028599df7aedac5cdee655741b0c321af42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Prerequisites.cuh" #include "CTF.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "DeviceFunctions.cuh" namespace gtom { template<int maxbins> __global__ void CTFRotationalAverageKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params); template<class T> __global__ void CTFRotationalAverageToTargetKernel(T* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda); template<int maxbins> __global__ void CTFRotationalAverageToTargetDeterministicKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, ushort numbins, ushort freqlow, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda); //////////////////////////////////////////////////////////// //Correct the CTF function to make all amplitudes positive// //////////////////////////////////////////////////////////// void d_CTFRotationalAverage(tfloat* d_re, int2 dimsinput, CTFParams* h_params, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { float2* h_targetcoords = (float2*)malloc(ElementsFFT2(dimsinput) * sizeof(float2)); float invhalfsize = 1.0f / (float)dimsinput.x; float center = dimsinput.x / 2; for (int y = 0; y < dimsinput.y; y++) { for (int x = 0; x < ElementsFFT1(dimsinput.x); x++) { float2 point = make_float2(x - center, y - center); float angle = atan2(point.y, point.x); h_targetcoords[y * ElementsFFT1(dimsinput.x) + x] = make_float2(sqrt(point.x * point.x + point.y * point.y) * invhalfsize, angle); } } float2* d_targetcoords = (float2*)CudaMallocFromHostArray(h_targetcoords, ElementsFFT2(dimsinput) * sizeof(float2)); free(h_targetcoords); d_CTFRotationalAverage(d_re, d_targetcoords, ElementsFFT2(dimsinput), dimsinput.x, h_params, d_average, freqlow, freqhigh, batch); hipFree(d_targetcoords); } void d_CTFRotationalAverage(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); dim3 TpB = dim3(192); dim3 grid = dim3(tmin(32, (inputlength + TpB.x - 1) / TpB.x), batch); tfloat* d_tempbins, *d_tempweights; hipMalloc((void**)&d_tempbins, numbins * grid.x * grid.y * sizeof(tfloat)); hipMalloc((void**)&d_tempweights, numbins * grid.x * grid.y * sizeof(tfloat)); if (numbins <= 513) CTFRotationalAverageKernel<513> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 1025) CTFRotationalAverageKernel<1025> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 2049) CTFRotationalAverageKernel<2049> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 4097) CTFRotationalAverageKernel<4097> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else throw; d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x, batch); //hipMemcpy(d_average, d_tempbins, numbins * batch * sizeof(tfloat), hipMemcpyDeviceToDevice); hipFree(d_tempweights); hipFree(d_tempbins); hipFree(d_lean); free(h_lean); } template<class T> void d_CTFRotationalAverageToTarget(T* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); CTFParamsLean targetparamslean = CTFParamsLean(targetparams, toInt3(sidelength, sidelength, 1)); dim3 TpB = dim3(192); dim3 grid = dim3(tmin(32, (inputlength + TpB.x - 1) / TpB.x), batch); tfloat* d_tempbins, *d_tempweights; hipMalloc((void**)&d_tempbins, numbins * grid.x * grid.y * sizeof(tfloat)); hipMalloc((void**)&d_tempweights, numbins * grid.x * grid.y * sizeof(tfloat)); tfloat2 ny = tfloat2(1.0 / (sidelength * sidelength), 1.0 / pow(sidelength, 4.0)); tfloat2 cs = tfloat2(h_lean[0].Cs, h_lean[0].Cs * h_lean[0].Cs); tfloat2 targetpx = tfloat2(pow(targetparamslean.pixelsize, 2.0), pow(targetparamslean.pixelsize, 4.0)); tfloat2 targetz = tfloat2(targetparamslean.defocus, targetparamslean.defocus * targetparamslean.defocus); tfloat2 lambda = tfloat2(pow(targetparamslean.lambda, 2.0), pow(targetparamslean.lambda, 4.0)); if (numbins <= 1024) CTFRotationalAverageToTargetKernel << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean, ny, cs, targetpx, targetz, lambda); else throw; d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x * batch, 1); hipFree(d_tempweights); hipFree(d_tempbins); hipFree(d_lean); free(h_lean); } template void d_CTFRotationalAverageToTarget<tfloat>(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch); template void d_CTFRotationalAverageToTarget<tcomplex>(tcomplex* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch); void d_CTFRotationalAverageToTargetDeterministic(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); CTFParamsLean targetparamslean = CTFParamsLean(targetparams, toInt3(sidelength, sidelength, 1)); dim3 TpB = dim3(128); dim3 grid = dim3(tmin(64, (inputlength + TpB.x - 1) / TpB.x), 1); tfloat* d_tempbins, *d_tempweights; hipMalloc((void**)&d_tempbins, numbins * grid.x * batch * sizeof(tfloat)); hipMalloc((void**)&d_tempweights, numbins * grid.x * batch * sizeof(tfloat)); tfloat2 ny = tfloat2(1.0 / (sidelength * sidelength), 1.0 / pow(sidelength, 4.0)); tfloat2 cs = tfloat2(h_lean[0].Cs, h_lean[0].Cs * h_lean[0].Cs); tfloat2 targetpx = tfloat2(pow(targetparamslean.pixelsize, 2.0), pow(targetparamslean.pixelsize, 4.0)); tfloat2 targetz = tfloat2(targetparamslean.defocus, targetparamslean.defocus * targetparamslean.defocus); tfloat2 lambda = tfloat2(pow(targetparamslean.lambda, 2.0), pow(targetparamslean.lambda, 4.0)); for (int b = 0; b < batch; b++) if (numbins <= 512) CTFRotationalAverageToTargetDeterministicKernel<512> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 1024) CTFRotationalAverageToTargetDeterministicKernel<1024> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 2048) CTFRotationalAverageToTargetDeterministicKernel<2048> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 4096) CTFRotationalAverageToTargetDeterministicKernel<4096> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else throw; /*if (h_consider != NULL) { std::vector<int> positions; for (int i = 0; i < batch; i++) if (h_consider[i] > 0) positions.push_back(i); tfloat* d_densebins; hipMalloc((void**)&d_densebins, numbins * grid.x * positions.size() * sizeof(tfloat)); tfloat* d_denseweights; hipMalloc((void**)&d_denseweights, numbins * grid.x * positions.size() * sizeof(tfloat)); for (int i = 0; i < positions.size(); i++) { hipMemcpy(d_densebins + numbins * grid.x * i, d_tempbins + numbins * grid.x * positions[i], numbins * grid.x * sizeof(tfloat), hipMemcpyDeviceToDevice); hipMemcpy(d_denseweights + numbins * grid.x * i, d_tempweights + numbins * grid.x * positions[i], numbins * grid.x * sizeof(tfloat), hipMemcpyDeviceToDevice); } d_ReduceMeanWeighted(d_densebins, d_denseweights, d_average, numbins, grid.x * positions.size(), 1); hipFree(d_densebins); hipFree(d_denseweights); } else*/ { d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x * batch, 1); //hipMemcpy(d_average, d_tempbins, numbins * batch * sizeof(tfloat), hipMemcpyDeviceToDevice); } hipFree(d_tempweights); hipFree(d_tempbins); hipFree(d_lean); free(h_lean); } //////////////// //CUDA kernels// //////////////// __device__ tfloat d_CTFRescale(tfloat srcx, tfloat ny2, tfloat ny4, tfloat cs, tfloat cs2, tfloat srcpx, tfloat trgtpx2, tfloat trgtpx4, tfloat srcz, tfloat targetz, tfloat targetz2, tfloat lambda2, tfloat lambda4) { tfloat srcx2 = srcx * srcx; tfloat srcx4 = srcx2 * srcx2; tfloat srcpx2 = srcpx * srcpx; tfloat srcpx4 = srcpx2 * srcpx2; double summand1 = (double)cs2 * lambda4 * ny4 * srcx4; double summand2 = 2.0 * cs * lambda2 * ny2 * srcpx2 * srcx2 * srcz; double summand3 = (double)srcpx4 * targetz2; double firstroot = -sqrt(trgtpx4 * srcpx4 * (summand1 + summand2 + summand3)); double numerator = firstroot + trgtpx2 * srcpx4 * abs(targetz); double denominator = (double)cs * lambda2 * ny2 * srcpx4; tfloat x = (tfloat)sqrt(abs(numerator / denominator)); return x; } template<int maxbins> __global__ void CTFRotationalAverageKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params) { __shared__ tfloat s_bins[maxbins], s_weights[maxbins]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; double cs2 = p.Cs * p.Cs; double defocus2 = p.defocus * p.defocus; double lambda2 = p.lambda * p.lambda; double lambda4 = lambda2 * lambda2; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float radius = d_inputcoords[id].x; float angle = d_inputcoords[id].y; radius *= p.ny; double radius2 = radius * radius; double radius4 = radius2 * radius2; double astdefocus = p.defocus - p.defocusdelta * cos(2.0f * (angle + (float)p.astigmatismangle)); double originalradius = sqrt(abs(abs(p.defocus) - sqrt(cs2 * radius4 * lambda4 + 2.0 * p.Cs * astdefocus * radius2 * lambda2 + defocus2)) / (p.Cs * lambda2)); originalradius /= p.ny * 2.0 / (double)sidelength; tfloat val = d_input[id]; short lowbin = floor(originalradius), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - originalradius, highweight = (tfloat)1 - lowweight; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<class T> __global__ void CTFRotationalAverageToTargetKernel(T* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[1024], s_weights[1024]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tfloat val = d_input[id]; short lowbin = floor(targetx), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - targetx, highweight = (tfloat)1 - lowweight; lowweight *= p.scale; highweight *= p.scale; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<> __global__ void CTFRotationalAverageToTargetKernel<tcomplex>(tcomplex* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[1024], s_weights[1024]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tcomplex valc = d_input[id]; tfloat val = sqrt(valc.x * valc.x + valc.y * valc.y); short lowbin = floor(targetx), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - targetx, highweight = (tfloat)1 - lowweight; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<int maxbins> __global__ void CTFRotationalAverageToTargetDeterministicKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, ushort numbins, ushort freqlow, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[maxbins], s_weights[maxbins]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += (blockIdx.y * gridDim.x + blockIdx.x) * numbins; d_averageweights += (blockIdx.y * gridDim.x + blockIdx.x) * numbins; for (uint id = blockIdx.x; id < inputlength; id += gridDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tfloat val = d_input[id]; for (ushort bin = threadIdx.x; bin < numbins; bin += blockDim.x) { float dist = abs(targetx - (float)(freqlow + bin)); if (dist < 4.0f) { float weight = sinc(dist); s_bins[bin] += val * weight; s_weights[bin] += weight; } } } __syncthreads(); for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } }
3ef0a028599df7aedac5cdee655741b0c321af42.cu
#include "Prerequisites.cuh" #include "CTF.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "DeviceFunctions.cuh" namespace gtom { template<int maxbins> __global__ void CTFRotationalAverageKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params); template<class T> __global__ void CTFRotationalAverageToTargetKernel(T* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda); template<int maxbins> __global__ void CTFRotationalAverageToTargetDeterministicKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, ushort numbins, ushort freqlow, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda); //////////////////////////////////////////////////////////// //Correct the CTF function to make all amplitudes positive// //////////////////////////////////////////////////////////// void d_CTFRotationalAverage(tfloat* d_re, int2 dimsinput, CTFParams* h_params, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { float2* h_targetcoords = (float2*)malloc(ElementsFFT2(dimsinput) * sizeof(float2)); float invhalfsize = 1.0f / (float)dimsinput.x; float center = dimsinput.x / 2; for (int y = 0; y < dimsinput.y; y++) { for (int x = 0; x < ElementsFFT1(dimsinput.x); x++) { float2 point = make_float2(x - center, y - center); float angle = atan2(point.y, point.x); h_targetcoords[y * ElementsFFT1(dimsinput.x) + x] = make_float2(sqrt(point.x * point.x + point.y * point.y) * invhalfsize, angle); } } float2* d_targetcoords = (float2*)CudaMallocFromHostArray(h_targetcoords, ElementsFFT2(dimsinput) * sizeof(float2)); free(h_targetcoords); d_CTFRotationalAverage(d_re, d_targetcoords, ElementsFFT2(dimsinput), dimsinput.x, h_params, d_average, freqlow, freqhigh, batch); cudaFree(d_targetcoords); } void d_CTFRotationalAverage(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); dim3 TpB = dim3(192); dim3 grid = dim3(tmin(32, (inputlength + TpB.x - 1) / TpB.x), batch); tfloat* d_tempbins, *d_tempweights; cudaMalloc((void**)&d_tempbins, numbins * grid.x * grid.y * sizeof(tfloat)); cudaMalloc((void**)&d_tempweights, numbins * grid.x * grid.y * sizeof(tfloat)); if (numbins <= 513) CTFRotationalAverageKernel<513> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 1025) CTFRotationalAverageKernel<1025> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 2049) CTFRotationalAverageKernel<2049> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else if (numbins <= 4097) CTFRotationalAverageKernel<4097> << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean); else throw; d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x, batch); //cudaMemcpy(d_average, d_tempbins, numbins * batch * sizeof(tfloat), cudaMemcpyDeviceToDevice); cudaFree(d_tempweights); cudaFree(d_tempbins); cudaFree(d_lean); free(h_lean); } template<class T> void d_CTFRotationalAverageToTarget(T* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); CTFParamsLean targetparamslean = CTFParamsLean(targetparams, toInt3(sidelength, sidelength, 1)); dim3 TpB = dim3(192); dim3 grid = dim3(tmin(32, (inputlength + TpB.x - 1) / TpB.x), batch); tfloat* d_tempbins, *d_tempweights; cudaMalloc((void**)&d_tempbins, numbins * grid.x * grid.y * sizeof(tfloat)); cudaMalloc((void**)&d_tempweights, numbins * grid.x * grid.y * sizeof(tfloat)); tfloat2 ny = tfloat2(1.0 / (sidelength * sidelength), 1.0 / pow(sidelength, 4.0)); tfloat2 cs = tfloat2(h_lean[0].Cs, h_lean[0].Cs * h_lean[0].Cs); tfloat2 targetpx = tfloat2(pow(targetparamslean.pixelsize, 2.0), pow(targetparamslean.pixelsize, 4.0)); tfloat2 targetz = tfloat2(targetparamslean.defocus, targetparamslean.defocus * targetparamslean.defocus); tfloat2 lambda = tfloat2(pow(targetparamslean.lambda, 2.0), pow(targetparamslean.lambda, 4.0)); if (numbins <= 1024) CTFRotationalAverageToTargetKernel << <grid, TpB >> > (d_input, d_inputcoords, d_tempbins, d_tempweights, inputlength, sidelength, numbins, freqlow, freqhigh, d_lean, ny, cs, targetpx, targetz, lambda); else throw; d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x * batch, 1); cudaFree(d_tempweights); cudaFree(d_tempbins); cudaFree(d_lean); free(h_lean); } template void d_CTFRotationalAverageToTarget<tfloat>(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch); template void d_CTFRotationalAverageToTarget<tcomplex>(tcomplex* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch); void d_CTFRotationalAverageToTargetDeterministic(tfloat* d_input, float2* d_inputcoords, uint inputlength, uint sidelength, CTFParams* h_params, CTFParams targetparams, tfloat* d_average, ushort freqlow, ushort freqhigh, int batch) { uint numbins = freqhigh - freqlow; CTFParamsLean* h_lean = (CTFParamsLean*)malloc(batch * sizeof(CTFParamsLean)); for (uint i = 0; i < batch; i++) h_lean[i] = CTFParamsLean(h_params[i], toInt3(sidelength, sidelength, 1)); CTFParamsLean* d_lean = (CTFParamsLean*)CudaMallocFromHostArray(h_lean, batch * sizeof(CTFParamsLean)); CTFParamsLean targetparamslean = CTFParamsLean(targetparams, toInt3(sidelength, sidelength, 1)); dim3 TpB = dim3(128); dim3 grid = dim3(tmin(64, (inputlength + TpB.x - 1) / TpB.x), 1); tfloat* d_tempbins, *d_tempweights; cudaMalloc((void**)&d_tempbins, numbins * grid.x * batch * sizeof(tfloat)); cudaMalloc((void**)&d_tempweights, numbins * grid.x * batch * sizeof(tfloat)); tfloat2 ny = tfloat2(1.0 / (sidelength * sidelength), 1.0 / pow(sidelength, 4.0)); tfloat2 cs = tfloat2(h_lean[0].Cs, h_lean[0].Cs * h_lean[0].Cs); tfloat2 targetpx = tfloat2(pow(targetparamslean.pixelsize, 2.0), pow(targetparamslean.pixelsize, 4.0)); tfloat2 targetz = tfloat2(targetparamslean.defocus, targetparamslean.defocus * targetparamslean.defocus); tfloat2 lambda = tfloat2(pow(targetparamslean.lambda, 2.0), pow(targetparamslean.lambda, 4.0)); for (int b = 0; b < batch; b++) if (numbins <= 512) CTFRotationalAverageToTargetDeterministicKernel<512> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 1024) CTFRotationalAverageToTargetDeterministicKernel<1024> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 2048) CTFRotationalAverageToTargetDeterministicKernel<2048> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else if (numbins <= 4096) CTFRotationalAverageToTargetDeterministicKernel<4096> << <grid, TpB >> > (d_input + inputlength * b, d_inputcoords, d_tempbins + numbins * grid.x * b, d_tempweights + numbins * grid.x * b, inputlength, numbins, freqlow, d_lean + b, ny, cs, targetpx, targetz, lambda); else throw; /*if (h_consider != NULL) { std::vector<int> positions; for (int i = 0; i < batch; i++) if (h_consider[i] > 0) positions.push_back(i); tfloat* d_densebins; cudaMalloc((void**)&d_densebins, numbins * grid.x * positions.size() * sizeof(tfloat)); tfloat* d_denseweights; cudaMalloc((void**)&d_denseweights, numbins * grid.x * positions.size() * sizeof(tfloat)); for (int i = 0; i < positions.size(); i++) { cudaMemcpy(d_densebins + numbins * grid.x * i, d_tempbins + numbins * grid.x * positions[i], numbins * grid.x * sizeof(tfloat), cudaMemcpyDeviceToDevice); cudaMemcpy(d_denseweights + numbins * grid.x * i, d_tempweights + numbins * grid.x * positions[i], numbins * grid.x * sizeof(tfloat), cudaMemcpyDeviceToDevice); } d_ReduceMeanWeighted(d_densebins, d_denseweights, d_average, numbins, grid.x * positions.size(), 1); cudaFree(d_densebins); cudaFree(d_denseweights); } else*/ { d_ReduceMeanWeighted(d_tempbins, d_tempweights, d_average, numbins, grid.x * batch, 1); //cudaMemcpy(d_average, d_tempbins, numbins * batch * sizeof(tfloat), cudaMemcpyDeviceToDevice); } cudaFree(d_tempweights); cudaFree(d_tempbins); cudaFree(d_lean); free(h_lean); } //////////////// //CUDA kernels// //////////////// __device__ tfloat d_CTFRescale(tfloat srcx, tfloat ny2, tfloat ny4, tfloat cs, tfloat cs2, tfloat srcpx, tfloat trgtpx2, tfloat trgtpx4, tfloat srcz, tfloat targetz, tfloat targetz2, tfloat lambda2, tfloat lambda4) { tfloat srcx2 = srcx * srcx; tfloat srcx4 = srcx2 * srcx2; tfloat srcpx2 = srcpx * srcpx; tfloat srcpx4 = srcpx2 * srcpx2; double summand1 = (double)cs2 * lambda4 * ny4 * srcx4; double summand2 = 2.0 * cs * lambda2 * ny2 * srcpx2 * srcx2 * srcz; double summand3 = (double)srcpx4 * targetz2; double firstroot = -sqrt(trgtpx4 * srcpx4 * (summand1 + summand2 + summand3)); double numerator = firstroot + trgtpx2 * srcpx4 * abs(targetz); double denominator = (double)cs * lambda2 * ny2 * srcpx4; tfloat x = (tfloat)sqrt(abs(numerator / denominator)); return x; } template<int maxbins> __global__ void CTFRotationalAverageKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params) { __shared__ tfloat s_bins[maxbins], s_weights[maxbins]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; double cs2 = p.Cs * p.Cs; double defocus2 = p.defocus * p.defocus; double lambda2 = p.lambda * p.lambda; double lambda4 = lambda2 * lambda2; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float radius = d_inputcoords[id].x; float angle = d_inputcoords[id].y; radius *= p.ny; double radius2 = radius * radius; double radius4 = radius2 * radius2; double astdefocus = p.defocus - p.defocusdelta * cos(2.0f * (angle + (float)p.astigmatismangle)); double originalradius = sqrt(abs(abs(p.defocus) - sqrt(cs2 * radius4 * lambda4 + 2.0 * p.Cs * astdefocus * radius2 * lambda2 + defocus2)) / (p.Cs * lambda2)); originalradius /= p.ny * 2.0 / (double)sidelength; tfloat val = d_input[id]; short lowbin = floor(originalradius), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - originalradius, highweight = (tfloat)1 - lowweight; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<class T> __global__ void CTFRotationalAverageToTargetKernel(T* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[1024], s_weights[1024]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tfloat val = d_input[id]; short lowbin = floor(targetx), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - targetx, highweight = (tfloat)1 - lowweight; lowweight *= p.scale; highweight *= p.scale; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<> __global__ void CTFRotationalAverageToTargetKernel<tcomplex>(tcomplex* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, uint sidelength, ushort numbins, ushort freqlow, ushort freqhigh, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[1024], s_weights[1024]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += blockIdx.y * gridDim.x * numbins; d_averageweights += blockIdx.y * gridDim.x * numbins; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < inputlength; id += gridDim.x * blockDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tcomplex valc = d_input[id]; tfloat val = sqrt(valc.x * valc.x + valc.y * valc.y); short lowbin = floor(targetx), highbin = lowbin + 1; tfloat lowweight = (tfloat)(1 + lowbin) - targetx, highweight = (tfloat)1 - lowweight; if (lowbin >= freqlow && lowbin < freqhigh) { lowbin -= freqlow; atomicAdd(s_bins + lowbin, val * lowweight); atomicAdd(s_weights + lowbin, lowweight); } if (highbin >= freqlow && highbin < freqhigh) { highbin -= freqlow; atomicAdd(s_bins + highbin, val * highweight); atomicAdd(s_weights + highbin, highweight); } } __syncthreads(); d_average += blockIdx.x * numbins; d_averageweights += blockIdx.x * numbins; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } template<int maxbins> __global__ void CTFRotationalAverageToTargetDeterministicKernel(tfloat* d_input, float2* d_inputcoords, tfloat* d_average, tfloat* d_averageweights, uint inputlength, ushort numbins, ushort freqlow, CTFParamsLean* d_params, tfloat2 ny, tfloat2 cs, tfloat2 targetpx, tfloat2 targetz, tfloat2 lambda) { __shared__ tfloat s_bins[maxbins], s_weights[maxbins]; for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { s_bins[i] = 0; s_weights[i] = 0; } __syncthreads(); CTFParamsLean p = d_params[blockIdx.y]; d_input += blockIdx.y * inputlength; d_average += (blockIdx.y * gridDim.x + blockIdx.x) * numbins; d_averageweights += (blockIdx.y * gridDim.x + blockIdx.x) * numbins; for (uint id = blockIdx.x; id < inputlength; id += gridDim.x) { float sourcex = d_inputcoords[id].x; float angle = d_inputcoords[id].y; tfloat sourcepx = p.pixelsize + p.pixeldelta * cos(2.0f * (angle - (float)p.pixelangle)); tfloat sourcez = p.defocus + p.defocusdelta * cos(2.0f * (angle - (float)p.astigmatismangle)); tfloat targetx = d_CTFRescale(sourcex, ny.x, ny.y, cs.x, cs.y, sourcepx, targetpx.x, targetpx.y, sourcez, targetz.x, targetz.y, lambda.x, lambda.y); tfloat val = d_input[id]; for (ushort bin = threadIdx.x; bin < numbins; bin += blockDim.x) { float dist = abs(targetx - (float)(freqlow + bin)); if (dist < 4.0f) { float weight = sinc(dist); s_bins[bin] += val * weight; s_weights[bin] += weight; } } } __syncthreads(); for (ushort i = threadIdx.x; i < numbins; i += blockDim.x) { d_average[i] = s_weights[i] != 0 ? s_bins[i] / s_weights[i] : 0; d_averageweights[i] = s_weights[i]; } } }
eb2a4dad4d4a0a18937901cfc58c4e4aa2246536.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; #define REDUCE_ERROR() \ if (__any(error != (NNFloat)0.0)) \ { \ uint32_t tgx = threadIdx.x & cData._warpMask; \ error += __shfl(error, tgx ^ 1); \ error += __shfl(error, tgx ^ 2); \ error += __shfl(error, tgx ^ 4); \ error += __shfl(error, tgx ^ 8); \ error += __shfl(error, tgx ^ 16); \ if (tgx == 0) \ { \ atomicAdd(cData._pAccumulator, llitoulli(llrintf(ERRORSCALEF * error))); \ } \ } \ __device__ inline uint64_t llitoulli(int64_t l) { uint64_t u; asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l)); return u; } __device__ inline int64_t ullitolli(uint64_t u) { int64_t l; asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u)); return l; } void SetKLossGpuData() { hipError_t status; status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "hipMemcpyToSymbol: SetKernelsGpuData copy to cData failed"); } void GetKLossGpuData() { hipError_t status; status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "hipMemcpyToSymbol: SetKernelsGpuData copy From cData failed"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL1Error_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = fabsf(a); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += fabsf(a - (NNFloat)1.0) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2Error_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = (NNFloat)0.5 * a * a; } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += (NNFloat)0.5 * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += (NNFloat)0.5 * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = -log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawScaledMarginalCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) error = -cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0f / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateL1Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<> __global__ void kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<> __global__ void kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateL2Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateMultinomialCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateMultinomialScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. void kLossTempFunction() { kCalculateL1Error<NNFloat>(0, 0, 0, NULL, NULL); kCalculateL1Error<double>(0, 0, 0, NULL, NULL); kCalculateL1Error<unsigned char>(0, 0, 0, NULL, NULL); kCalculateL1Error<char>(0, 0, 0, NULL, NULL); kCalculateL1Error<uint32_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<uint64_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<int32_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<int64_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<NNFloat>(0, 0, 0, NULL, NULL); kCalculateL2Error<double>(0, 0, 0, NULL, NULL); kCalculateL2Error<unsigned char>(0, 0, 0, NULL, NULL); kCalculateL2Error<char>(0, 0, 0, NULL, NULL); kCalculateL2Error<uint32_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<uint64_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<int32_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<int64_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateSparseAnalogL1Error<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogMultinomialCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); }
eb2a4dad4d4a0a18937901cfc58c4e4aa2246536.cu
/* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; #define REDUCE_ERROR() \ if (__any(error != (NNFloat)0.0)) \ { \ uint32_t tgx = threadIdx.x & cData._warpMask; \ error += __shfl(error, tgx ^ 1); \ error += __shfl(error, tgx ^ 2); \ error += __shfl(error, tgx ^ 4); \ error += __shfl(error, tgx ^ 8); \ error += __shfl(error, tgx ^ 16); \ if (tgx == 0) \ { \ atomicAdd(cData._pAccumulator, llitoulli(llrintf(ERRORSCALEF * error))); \ } \ } \ __device__ inline uint64_t llitoulli(int64_t l) { uint64_t u; asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l)); return u; } __device__ inline int64_t ullitolli(uint64_t u) { int64_t l; asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u)); return l; } void SetKLossGpuData() { cudaError_t status; status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy to cData failed"); } void GetKLossGpuData() { cudaError_t status; status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy From cData failed"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL1Error_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = fabsf(a); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += fabsf(a - (NNFloat)1.0) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += fabsf(a - t); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += fabsf(a - t) - fabsf(a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2Error_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = (NNFloat)0.5 * a * a; } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += (NNFloat)0.5 * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += (NNFloat)0.5 * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += (NNFloat)0.5 * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += (NNFloat)0.5 * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; error = -log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += -t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawScaledMarginalCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) error = -cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0f / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } NNFloat kCalculateSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCE_ERROR() } template<typename T> NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateL1Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<> __global__ void kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<> __global__ void kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateL2Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateMultinomialCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCE_ERROR() } template<typename T> NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateMultinomialScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. void kLossTempFunction() { kCalculateL1Error<NNFloat>(0, 0, 0, NULL, NULL); kCalculateL1Error<double>(0, 0, 0, NULL, NULL); kCalculateL1Error<unsigned char>(0, 0, 0, NULL, NULL); kCalculateL1Error<char>(0, 0, 0, NULL, NULL); kCalculateL1Error<uint32_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<uint64_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<int32_t>(0, 0, 0, NULL, NULL); kCalculateL1Error<int64_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<NNFloat>(0, 0, 0, NULL, NULL); kCalculateL2Error<double>(0, 0, 0, NULL, NULL); kCalculateL2Error<unsigned char>(0, 0, 0, NULL, NULL); kCalculateL2Error<char>(0, 0, 0, NULL, NULL); kCalculateL2Error<uint32_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<uint64_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<int32_t>(0, 0, 0, NULL, NULL); kCalculateL2Error<int64_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL); kCalculateMultinomialScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL); kCalculateSparseAnalogL1Error<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL1Error<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogL2Error<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL, false); kCalculateSparseAnalogMultinomialCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<NNFloat>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<double>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<unsigned char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<char>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<uint32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<uint64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<int32_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<int64_t>(0, 0, 0, NULL, NULL, NULL, NULL, NULL); }
15a6d1647815e257cb513eda7440b31529cc951a.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" //#define REPEAT (128*8) //#define REPEAT (1) //#define REPEAT (16) //#define REPEAT (5) #define REPEAT (1) struct graph_data h_input; // host input struct graph_data d_input; // device input struct v_struct *g_temp_v; int compare00(const void *a, const void *b) { if (((struct v_struct *)a)->src > ((struct v_struct *)b)->src) return 1; if (((struct v_struct *)a)->src < ((struct v_struct *)b)->src) return -1; return (intT)(((struct v_struct *)a)->dst) - (intT)(((struct v_struct *)b)->dst); } int compare1(const void *a, const void *b) { return ((double *)a) - ((double *)b); } void dfs(int root, int idx, int *visited, uintT *csr_ptr, uintT *csr_idx) { //fprintf(stderr, "%d %d\n", root, idx); for(int i=csr_ptr[idx]; i<csr_ptr[idx+1]; i++) { if(visited[csr_idx[i]] == 0) { visited[csr_idx[i]] = 1; dfs(root, csr_idx[i], visited, csr_ptr, csr_idx); } } } int main(int argc, char **argv) { double tot_ms[REPEAT]; double tot_ms_t=0; g_temp_v = generate_graph(argc, argv, &h_input); struct v_struct *g_temp_inv; //originally chunk_size = 1000 #ifdef CHUNK_STREAMING for(d_input.chunk_size = 10000000; (double)d_input.chunk_size / h_input.E < 9.99; d_input.chunk_size *= 10) { if(d_input.chunk_size > h_input.E) d_input.chunk_size = h_input.E; #endif for(int loop=0; loop<REPEAT; loop++) { tot_ms[loop] = run_union_find(&h_input, &d_input); //#ifdef CHUNK_STREAMING //if(loop == REPEAT-1) fprintf(stdout, "%d,%f,", d_input.chunk_size, tot_ms[loop]); //#endif #ifdef PATH_LENGTH uintT *avg_length = (uintT *)malloc(sizeof(uintT)*PATH_SIZE); uintT *max_length = (uintT *)malloc(sizeof(uintT)*PATH_SIZE); hipMemcpy(avg_length, d_input.path_length, sizeof(uintT)*PATH_SIZE, hipMemcpyDeviceToHost); hipMemcpy(max_length, d_input.path_max, sizeof(uintT)*PATH_SIZE, hipMemcpyDeviceToHost); double avg=0; uintT max=0; for(int i=0;i<PATH_SIZE;i++) { avg += avg_length[i]; max = MAX(max, max_length[i]); } fprintf(stdout, "%f,%d,", avg/d_input.E, max); //exit(0); #endif #ifndef QUERY_TEST //#define VALIDATE #endif #ifdef VALIDATE //start of validation // for static connected components #if defined(SCC) || defined(STREAMING) || defined(STREAMING_SYNC) || defined(STREAMING_SIMPLE) int nv = h_input.V; int *visit = (int *)malloc(sizeof(int)*nv); memset(visit, -1, sizeof(int)*nv); int cnt=0; for(int v = 0; v < nv; v++) { if(h_input.label[v] < 0 || h_input.label[v] >= nv) fprintf(stderr, "ERR %d: %d %d\n",v, h_input.label[v], nv); if(visit[h_input.label[v]] < 0) { visit[h_input.label[v]] = cnt++; } visit[v] = visit[h_input.label[v]]; } char fpo_name[300]; strcpy(fpo_name, argv[1]); strcat(fpo_name, ".base"); FILE *fpo = fopen(fpo_name, "r"); for(int v =0; v<nv;v++) { int t; fscanf(fpo, "%d", &t); if(t != visit[v]) { printf("FAIL\n"); exit(0);} //if(t != visit[v]) { printf("%d %d %d\n", v, t, visit[v]); } } //printf("SUCC,"); #ifdef PRINT printf("PASS\n"); #endif free(visit); fclose(fpo); #endif // for spanning trees #if defined(SP_TREE) // check the # components char fpo_name[300]; strcpy(fpo_name, argv[1]); strcat(fpo_name, ".num"); FILE *fpo = fopen(fpo_name, "r"); fscanf(fpo, "%d", &h_input.cc_cnt); fclose(fpo); uintT tmp_cc_cnt = h_input.cc_cnt; uintT sp_e = 0; struct v_struct *sp_t = (struct v_struct *)malloc(sizeof(struct v_struct)*h_input.V*2); for(uintT i=0;i<h_input.V; i++) { uintT v = h_input.hook[i]; if(h_input.algo == RAND_NAIVE || h_input.algo == RAND_SPLIT_2) { if((h_input.lparent[i] & ULONG_T_MAX)) v = -1; } if(v == -1) { h_input.cc_cnt--; } else { sp_t[sp_e].src = g_temp_v[v].src; sp_t[sp_e].dst = g_temp_v[v].dst; sp_t[sp_e+1].src = g_temp_v[v].dst; sp_t[sp_e+1].dst = g_temp_v[v].src; sp_e += 2; } } if(h_input.cc_cnt != 0) { printf("FAIL1 %d\n", h_input.cc_cnt); exit(0);} qsort(sp_t, sp_e, sizeof(struct v_struct), compare00); uintT *sp_csr_ptr = (uintT *)malloc(sizeof(uintT)*(h_input.V+1)); uintT *sp_csr_idx = (uintT *)malloc(sizeof(uintT)*h_input.V*2); memset(sp_csr_ptr, 0, sizeof(uintT)*(h_input.V+1)); for(int i=0;i<sp_e;i++) { sp_csr_idx[i] = sp_t[i].dst; sp_csr_ptr[1+sp_t[i].src] = i+1; } for(int i=1;i<(h_input.V);i++) { if(sp_csr_ptr[i] == 0) sp_csr_ptr[i] = sp_csr_ptr[i-1]; } sp_csr_ptr[h_input.V] = sp_e; int *c_q, *c_vv; int qhead, qtail; c_q = (int *)malloc(sizeof(int)*h_input.V); c_vv = (int *)malloc(sizeof(int)*h_input.V); memset(c_vv, 0, sizeof(int)*h_input.V); for(uintT i=0; i<h_input.V; i++) { if(c_vv[i] == 0) { c_vv[i] = 1; tmp_cc_cnt--; qhead = 0; qtail = 1; c_q[qhead] = i; while(1) { if(qhead == qtail) break; int ii = c_q[qhead]; qhead++; for(int j=sp_csr_ptr[ii]; j<sp_csr_ptr[ii+1]; j++) { int k = sp_csr_idx[j]; if(c_vv[k] == 0) { c_vv[k] = 1; c_q[qtail] = k; qtail++; } } } } } if(tmp_cc_cnt != 0) {printf("FAIL2 %d\n",tmp_cc_cnt); exit(0);} free(sp_t); free(sp_csr_ptr); free(sp_csr_idx); free(c_q); free(c_vv); #endif #endif // end of validation } //#if defined(PATH_LENGTH) || defined(CHUNK_STREAMING) #if defined(PATH_LENGTH) //exit(0); #endif #ifdef PRINT printf("PASS\n"); #endif qsort(tot_ms, REPEAT, sizeof(double), compare1); if(REPEAT % 2 == 1) { #if defined(CHUNK_STREAMING) || defined(STREAMING) || defined(STREAMING_SIMPLE) printf("%f,",(double)d_input.E/tot_ms[REPEAT/2]/1000000); #else printf("%f,", tot_ms[REPEAT/2]); #endif } else { #if defined(CHUNK_STREAMING) || defined(STREAMING) || defined(STREAMING_SIMPLE) printf("%f,", (double)d_input.E/((tot_ms[(REPEAT-1)/2]+tot_ms[(REPEAT-1)/2+1])/2)/1000000); #else printf("%f,", (tot_ms[(REPEAT-1)/2]+tot_ms[(REPEAT-1)/2+1])/2); #endif } #if defined(CHUNK_STREAMING) } #endif #if defined(SP_TREE) free(g_temp_v); #endif exit(0); }
15a6d1647815e257cb513eda7440b31529cc951a.cu
#include "common.h" //#define REPEAT (128*8) //#define REPEAT (1) //#define REPEAT (16) //#define REPEAT (5) #define REPEAT (1) struct graph_data h_input; // host input struct graph_data d_input; // device input struct v_struct *g_temp_v; int compare00(const void *a, const void *b) { if (((struct v_struct *)a)->src > ((struct v_struct *)b)->src) return 1; if (((struct v_struct *)a)->src < ((struct v_struct *)b)->src) return -1; return (intT)(((struct v_struct *)a)->dst) - (intT)(((struct v_struct *)b)->dst); } int compare1(const void *a, const void *b) { return ((double *)a) - ((double *)b); } void dfs(int root, int idx, int *visited, uintT *csr_ptr, uintT *csr_idx) { //fprintf(stderr, "%d %d\n", root, idx); for(int i=csr_ptr[idx]; i<csr_ptr[idx+1]; i++) { if(visited[csr_idx[i]] == 0) { visited[csr_idx[i]] = 1; dfs(root, csr_idx[i], visited, csr_ptr, csr_idx); } } } int main(int argc, char **argv) { double tot_ms[REPEAT]; double tot_ms_t=0; g_temp_v = generate_graph(argc, argv, &h_input); struct v_struct *g_temp_inv; //originally chunk_size = 1000 #ifdef CHUNK_STREAMING for(d_input.chunk_size = 10000000; (double)d_input.chunk_size / h_input.E < 9.99; d_input.chunk_size *= 10) { if(d_input.chunk_size > h_input.E) d_input.chunk_size = h_input.E; #endif for(int loop=0; loop<REPEAT; loop++) { tot_ms[loop] = run_union_find(&h_input, &d_input); //#ifdef CHUNK_STREAMING //if(loop == REPEAT-1) fprintf(stdout, "%d,%f,", d_input.chunk_size, tot_ms[loop]); //#endif #ifdef PATH_LENGTH uintT *avg_length = (uintT *)malloc(sizeof(uintT)*PATH_SIZE); uintT *max_length = (uintT *)malloc(sizeof(uintT)*PATH_SIZE); cudaMemcpy(avg_length, d_input.path_length, sizeof(uintT)*PATH_SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(max_length, d_input.path_max, sizeof(uintT)*PATH_SIZE, cudaMemcpyDeviceToHost); double avg=0; uintT max=0; for(int i=0;i<PATH_SIZE;i++) { avg += avg_length[i]; max = MAX(max, max_length[i]); } fprintf(stdout, "%f,%d,", avg/d_input.E, max); //exit(0); #endif #ifndef QUERY_TEST //#define VALIDATE #endif #ifdef VALIDATE //start of validation // for static connected components #if defined(SCC) || defined(STREAMING) || defined(STREAMING_SYNC) || defined(STREAMING_SIMPLE) int nv = h_input.V; int *visit = (int *)malloc(sizeof(int)*nv); memset(visit, -1, sizeof(int)*nv); int cnt=0; for(int v = 0; v < nv; v++) { if(h_input.label[v] < 0 || h_input.label[v] >= nv) fprintf(stderr, "ERR %d: %d %d\n",v, h_input.label[v], nv); if(visit[h_input.label[v]] < 0) { visit[h_input.label[v]] = cnt++; } visit[v] = visit[h_input.label[v]]; } char fpo_name[300]; strcpy(fpo_name, argv[1]); strcat(fpo_name, ".base"); FILE *fpo = fopen(fpo_name, "r"); for(int v =0; v<nv;v++) { int t; fscanf(fpo, "%d", &t); if(t != visit[v]) { printf("FAIL\n"); exit(0);} //if(t != visit[v]) { printf("%d %d %d\n", v, t, visit[v]); } } //printf("SUCC,"); #ifdef PRINT printf("PASS\n"); #endif free(visit); fclose(fpo); #endif // for spanning trees #if defined(SP_TREE) // check the # components char fpo_name[300]; strcpy(fpo_name, argv[1]); strcat(fpo_name, ".num"); FILE *fpo = fopen(fpo_name, "r"); fscanf(fpo, "%d", &h_input.cc_cnt); fclose(fpo); uintT tmp_cc_cnt = h_input.cc_cnt; uintT sp_e = 0; struct v_struct *sp_t = (struct v_struct *)malloc(sizeof(struct v_struct)*h_input.V*2); for(uintT i=0;i<h_input.V; i++) { uintT v = h_input.hook[i]; if(h_input.algo == RAND_NAIVE || h_input.algo == RAND_SPLIT_2) { if((h_input.lparent[i] & ULONG_T_MAX)) v = -1; } if(v == -1) { h_input.cc_cnt--; } else { sp_t[sp_e].src = g_temp_v[v].src; sp_t[sp_e].dst = g_temp_v[v].dst; sp_t[sp_e+1].src = g_temp_v[v].dst; sp_t[sp_e+1].dst = g_temp_v[v].src; sp_e += 2; } } if(h_input.cc_cnt != 0) { printf("FAIL1 %d\n", h_input.cc_cnt); exit(0);} qsort(sp_t, sp_e, sizeof(struct v_struct), compare00); uintT *sp_csr_ptr = (uintT *)malloc(sizeof(uintT)*(h_input.V+1)); uintT *sp_csr_idx = (uintT *)malloc(sizeof(uintT)*h_input.V*2); memset(sp_csr_ptr, 0, sizeof(uintT)*(h_input.V+1)); for(int i=0;i<sp_e;i++) { sp_csr_idx[i] = sp_t[i].dst; sp_csr_ptr[1+sp_t[i].src] = i+1; } for(int i=1;i<(h_input.V);i++) { if(sp_csr_ptr[i] == 0) sp_csr_ptr[i] = sp_csr_ptr[i-1]; } sp_csr_ptr[h_input.V] = sp_e; int *c_q, *c_vv; int qhead, qtail; c_q = (int *)malloc(sizeof(int)*h_input.V); c_vv = (int *)malloc(sizeof(int)*h_input.V); memset(c_vv, 0, sizeof(int)*h_input.V); for(uintT i=0; i<h_input.V; i++) { if(c_vv[i] == 0) { c_vv[i] = 1; tmp_cc_cnt--; qhead = 0; qtail = 1; c_q[qhead] = i; while(1) { if(qhead == qtail) break; int ii = c_q[qhead]; qhead++; for(int j=sp_csr_ptr[ii]; j<sp_csr_ptr[ii+1]; j++) { int k = sp_csr_idx[j]; if(c_vv[k] == 0) { c_vv[k] = 1; c_q[qtail] = k; qtail++; } } } } } if(tmp_cc_cnt != 0) {printf("FAIL2 %d\n",tmp_cc_cnt); exit(0);} free(sp_t); free(sp_csr_ptr); free(sp_csr_idx); free(c_q); free(c_vv); #endif #endif // end of validation } //#if defined(PATH_LENGTH) || defined(CHUNK_STREAMING) #if defined(PATH_LENGTH) //exit(0); #endif #ifdef PRINT printf("PASS\n"); #endif qsort(tot_ms, REPEAT, sizeof(double), compare1); if(REPEAT % 2 == 1) { #if defined(CHUNK_STREAMING) || defined(STREAMING) || defined(STREAMING_SIMPLE) printf("%f,",(double)d_input.E/tot_ms[REPEAT/2]/1000000); #else printf("%f,", tot_ms[REPEAT/2]); #endif } else { #if defined(CHUNK_STREAMING) || defined(STREAMING) || defined(STREAMING_SIMPLE) printf("%f,", (double)d_input.E/((tot_ms[(REPEAT-1)/2]+tot_ms[(REPEAT-1)/2+1])/2)/1000000); #else printf("%f,", (tot_ms[(REPEAT-1)/2]+tot_ms[(REPEAT-1)/2+1])/2); #endif } #if defined(CHUNK_STREAMING) } #endif #if defined(SP_TREE) free(g_temp_v); #endif exit(0); }
ed260c9067124cd07419170ffabbc49ff6778ffe.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Final_Iterate_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; int *originIn = NULL; hipMalloc(&originIn, XSIZE*YSIZE); int *originOut = NULL; hipMalloc(&originOut, XSIZE*YSIZE); int *bestSeenIn = NULL; hipMalloc(&bestSeenIn, XSIZE*YSIZE); int *bestSeenOut = NULL; hipMalloc(&bestSeenOut, XSIZE*YSIZE); int *adjIndexes = NULL; hipMalloc(&adjIndexes, XSIZE*YSIZE); int *adjacency = NULL; hipMalloc(&adjacency, XSIZE*YSIZE); int *mis = NULL; hipMalloc(&mis, XSIZE*YSIZE); int *incomplete = NULL; hipMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Final_Iterate_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Final_Iterate_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Final_Iterate_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ed260c9067124cd07419170ffabbc49ff6778ffe.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Final_Iterate_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; int *originIn = NULL; cudaMalloc(&originIn, XSIZE*YSIZE); int *originOut = NULL; cudaMalloc(&originOut, XSIZE*YSIZE); int *bestSeenIn = NULL; cudaMalloc(&bestSeenIn, XSIZE*YSIZE); int *bestSeenOut = NULL; cudaMalloc(&bestSeenOut, XSIZE*YSIZE); int *adjIndexes = NULL; cudaMalloc(&adjIndexes, XSIZE*YSIZE); int *adjacency = NULL; cudaMalloc(&adjacency, XSIZE*YSIZE); int *mis = NULL; cudaMalloc(&mis, XSIZE*YSIZE); int *incomplete = NULL; cudaMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Final_Iterate_Kernel<<<gridBlock,threadBlock>>>(size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Final_Iterate_Kernel<<<gridBlock,threadBlock>>>(size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Final_Iterate_Kernel<<<gridBlock,threadBlock>>>(size,originIn,originOut,bestSeenIn,bestSeenOut,adjIndexes,adjacency,mis,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c162a3e81e77d44f6623062353725132ee7d5857.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_stencil2D_4pt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); double *src = NULL; hipMalloc(&src, XSIZE*YSIZE); int M = 2; int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_stencil2D_4pt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,M,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_stencil2D_4pt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,M,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_stencil2D_4pt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,src,M,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c162a3e81e77d44f6623062353725132ee7d5857.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_stencil2D_4pt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); double *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); int M = 2; int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_stencil2D_4pt<<<gridBlock,threadBlock>>>(dst,src,M,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_stencil2D_4pt<<<gridBlock,threadBlock>>>(dst,src,M,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_stencil2D_4pt<<<gridBlock,threadBlock>>>(dst,src,M,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9c0d820e2686f125d8fd247a62069deabf0a8d66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addBias(float* Z, float* b, int Z_x_dim, int Z_y_dim){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < Z_y_dim && col < Z_x_dim){ Z[row * Z_x_dim + col] += b[row]; } }
9c0d820e2686f125d8fd247a62069deabf0a8d66.cu
#include "includes.h" __global__ void addBias(float* Z, float* b, int Z_x_dim, int Z_y_dim){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < Z_y_dim && col < Z_x_dim){ Z[row * Z_x_dim + col] += b[row]; } }
7ee4cd5499e0537fe551277dde4e04fffbcc6f53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <cfloat> #include <cmath> #define BLOCK_LENGTH 16 #define BLOCK_WIDTH 12 #define NUM_SM 2 #define NUM_THREAD 192 __global__ void _k_schared_reduce_min_max(const float* const d_input, float* const d_output, const unsigned int input_size, const bool min_or_max) { const unsigned int my_absolute_position = blockDim.x * blockIdx.x + threadIdx.x; const int thread_id = threadIdx.x; extern __shared__ float shared_input_copy[]; if (my_absolute_position < input_size) shared_input_copy[thread_id] = d_input[my_absolute_position]; else { if (min_or_max) { shared_input_copy[thread_id] = -FLT_MAX; } else { shared_input_copy[thread_id] = FLT_MAX; } return; } __syncthreads(); for (unsigned int i = blockDim.x / 2; i > 0; i >>= 1) { if (thread_id < i) { if (min_or_max) { shared_input_copy[thread_id] = max(shared_input_copy[thread_id], shared_input_copy[thread_id + i]); } else { shared_input_copy[thread_id] = min(shared_input_copy[thread_id], shared_input_copy[thread_id + i]); } } } __syncthreads(); if (thread_id == 0) { d_output[blockIdx.x] = shared_input_copy[0]; } } void find_extrem_value(const float* const d_logLuminance, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols) { unsigned int input_size = numRows * numCols; unsigned int grid_size = (input_size + NUM_THREAD - 1) / NUM_THREAD, block_size = NUM_THREAD; const unsigned int schared_memory_size = sizeof(float) * block_size; float *d_max_input, *d_min_input, *d_max_output, *d_min_output; checkCudaErrors(hipMalloc(&d_min_input, input_size * sizeof(float))); checkCudaErrors(hipMalloc(&d_max_input, input_size * sizeof(float))); checkCudaErrors(hipMemcpy(d_min_input, d_logLuminance, input_size * sizeof(float), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_max_input, d_logLuminance, input_size * sizeof(float), hipMemcpyDeviceToDevice)); while(grid_size > 1) { checkCudaErrors(hipMalloc(&d_max_output, sizeof(float) * grid_size)); checkCudaErrors(hipMalloc(&d_min_output, sizeof(float) * grid_size)); hipLaunchKernelGGL(( _k_schared_reduce_min_max), dim3(grid_size), dim3(block_size), schared_memory_size, 0, d_min_input, d_min_output, input_size, 0); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( _k_schared_reduce_min_max), dim3(grid_size), dim3(block_size), schared_memory_size, 0, d_max_input, d_max_output, input_size, 1); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); input_size = grid_size; grid_size = (grid_size + NUM_THREAD - 1) / NUM_THREAD; checkCudaErrors(hipFree(d_min_input)); checkCudaErrors(hipFree(d_max_input)); d_min_input = d_min_output; d_max_input = d_max_output; } checkCudaErrors(hipMemcpy(&min_logLum, d_min_input, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&max_logLum, d_max_input, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_max_input)); checkCudaErrors(hipFree(d_min_input)); } __global__ void _k_schared_generate_histogram(const float* const d_logLuminance, const float &min_logLum, const float &lum_range, const size_t numBins, unsigned int* const d_histogram, const unsigned int input_size) { unsigned int my_absolute_position = blockDim.y * blockIdx.x + threadIdx.x; unsigned int thread_id = threadIdx.x; extern __shared__ unsigned int shared_bins[]; if (my_absolute_position < input_size) { unsigned char bin_positon = (d_logLuminance[my_absolute_position] - min_logLum) / lum_range * numBins; shared_bins[thread_id * numBins + bin_positon]++; } else { return; } __syncthreads; for (int i = blockDim.x / 2; i > 0; i>>=1) { if (thread_id < i) { for (int j = 0; j < numBins; ++j) { shared_bins[thread_id * numBins + j] += shared_bins[(thread_id + i) * numBins + j]; } } } __syncthreads(); if (thread_id == 0) { for (int j = 0; j < numBins; ++j) { d_histogram[blockIdx.x * numBins + j] += shared_bins[j]; } } } __global__ void _k_schared_reduce_histogram(const unsigned int* const d_histogram_input, unsigned int* const d_histogram, const unsigned int input_size, const size_t numBins) { unsigned int my_absolute_position = blockDim.y * blockIdx.x + threadIdx.x; unsigned int thread_id = threadIdx.x; extern __shared__ unsigned int d_shared_histogram[]; if (my_absolute_position < input_size) { for (int i = 0; i < numBins; ++i) { d_shared_histogram[thread_id * numBins + i] = d_histogram_input[my_absolute_position * numBins + i]; } } else { return; } __syncthreads(); for (int i = blockDim.x ; i > 0; i >>= 1) { if (thread_id < i) { for (int j = 0; j < numBins; ++j) { d_shared_histogram[thread_id * numBins + j] += d_shared_histogram[(thread_id + i) * numBins + j]; } } __syncthreads(); } if (thread_id == 0) { for (int j = 0; j < numBins; ++j) { d_histogram[blockIdx.x * numBins + j] += d_shared_histogram[j]; } } } unsigned int* compute_histogram(const float* const d_logLuminance, const float &min_logLum, const float &lum_range, const size_t numBins, const size_t numRows, const size_t numCols) { unsigned int input_size = numRows * numCols; unsigned int grid_size = (input_size + NUM_THREAD - 1) / NUM_THREAD, block_size = NUM_THREAD; const unsigned int schared_memory_size = block_size * sizeof(unsigned int) * numBins; unsigned int *d_histogram; checkCudaErrors(hipMalloc(&d_histogram, numBins * sizeof(unsigned int) * grid_size)); hipLaunchKernelGGL(( _k_schared_generate_histogram), dim3(grid_size), dim3(block_size), schared_memory_size, 0, d_logLuminance, min_logLum, lum_range, numBins, d_histogram, input_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); unsigned int *d_histogram_input = d_histogram; while(grid_size > 1) { checkCudaErrors(hipMalloc(&d_histogram, numBins * sizeof(float) * grid_size)); hipLaunchKernelGGL(( _k_schared_reduce_histogram), dim3(grid_size), dim3(block_size), schared_memory_size, 0, d_histogram_input, d_histogram, input_size, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_histogram_input)); d_histogram_input = d_histogram; input_size = grid_size; grid_size = (grid_size + NUM_THREAD - 1) / NUM_THREAD; } return d_histogram_input; } __global__ void _k_perform_scan(unsigned int* const d_histogram, const size_t numBins) { int mid = threadIdx.x + blockDim.x * blockIdx.x; if(mid >= numBins) return; for(int s = 1; s <= numBins; s *= 2) { int spot = mid - s; unsigned int val = 0; if(spot >= 0) val = d_histogram[spot]; __syncthreads(); if(spot >= 0) d_histogram[mid] += val; __syncthreads(); } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // 1) find the minimum and maximum value in the input logLuminance channel // store in min_logLum and max_logLum float* h_logLuminance = new float[numRows * numCols * sizeof(float)]; checkCudaErrors(hipMemcpy(h_logLuminance, d_logLuminance, numRows * numCols * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < numCols * numRows; ++i) { std::cout<<" "<<h_logLuminance[i]; } std::cout<<std::endl; find_extrem_value(d_logLuminance, min_logLum, max_logLum,numRows, numCols); std::cout<<"The minimal value is "<<min_logLum<<std::endl; std::cout<<"The maximal value is "<<max_logLum<<std::endl; // 2) subtract them to find the range float lum_range = (max_logLum - min_logLum) / (numBins * 1.0); std::cout<<"The range is "<<lum_range<<std::endl; std::cout<<"Number of bins is "<<numBins<<std::endl; // 3) generate a histogram of all the values in the logLuminance channel using // the formula: bin = (lum[i] - lumMin) / lumRange * numBins unsigned int* d_histogram = compute_histogram(d_logLuminance, min_logLum, lum_range, numBins, numRows, numCols); // 4) Perform an exclusive scan (prefix sum) on the histogram to get // the cumulative distribution of luminance values (this should go in the // incoming d_cdf pointer which already has been allocated for you) dim3 grid_size((numBins + NUM_THREAD - 1) / NUM_THREAD), block_size(NUM_THREAD); hipLaunchKernelGGL(( _k_perform_scan), dim3(grid_size), dim3(block_size), 0, 0, d_histogram, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(d_cdf, d_histogram, sizeof(float) * numBins, hipMemcpyDeviceToDevice)); checkCudaErrors(hipFree(d_histogram)); d_histogram = NULL; }
7ee4cd5499e0537fe551277dde4e04fffbcc6f53.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <cfloat> #include <cmath> #define BLOCK_LENGTH 16 #define BLOCK_WIDTH 12 #define NUM_SM 2 #define NUM_THREAD 192 __global__ void _k_schared_reduce_min_max(const float* const d_input, float* const d_output, const unsigned int input_size, const bool min_or_max) { const unsigned int my_absolute_position = blockDim.x * blockIdx.x + threadIdx.x; const int thread_id = threadIdx.x; extern __shared__ float shared_input_copy[]; if (my_absolute_position < input_size) shared_input_copy[thread_id] = d_input[my_absolute_position]; else { if (min_or_max) { shared_input_copy[thread_id] = -FLT_MAX; } else { shared_input_copy[thread_id] = FLT_MAX; } return; } __syncthreads(); for (unsigned int i = blockDim.x / 2; i > 0; i >>= 1) { if (thread_id < i) { if (min_or_max) { shared_input_copy[thread_id] = max(shared_input_copy[thread_id], shared_input_copy[thread_id + i]); } else { shared_input_copy[thread_id] = min(shared_input_copy[thread_id], shared_input_copy[thread_id + i]); } } } __syncthreads(); if (thread_id == 0) { d_output[blockIdx.x] = shared_input_copy[0]; } } void find_extrem_value(const float* const d_logLuminance, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols) { unsigned int input_size = numRows * numCols; unsigned int grid_size = (input_size + NUM_THREAD - 1) / NUM_THREAD, block_size = NUM_THREAD; const unsigned int schared_memory_size = sizeof(float) * block_size; float *d_max_input, *d_min_input, *d_max_output, *d_min_output; checkCudaErrors(cudaMalloc(&d_min_input, input_size * sizeof(float))); checkCudaErrors(cudaMalloc(&d_max_input, input_size * sizeof(float))); checkCudaErrors(cudaMemcpy(d_min_input, d_logLuminance, input_size * sizeof(float), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_max_input, d_logLuminance, input_size * sizeof(float), cudaMemcpyDeviceToDevice)); while(grid_size > 1) { checkCudaErrors(cudaMalloc(&d_max_output, sizeof(float) * grid_size)); checkCudaErrors(cudaMalloc(&d_min_output, sizeof(float) * grid_size)); _k_schared_reduce_min_max<<<grid_size, block_size, schared_memory_size>>> (d_min_input, d_min_output, input_size, 0); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); _k_schared_reduce_min_max<<<grid_size, block_size, schared_memory_size>>> (d_max_input, d_max_output, input_size, 1); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); input_size = grid_size; grid_size = (grid_size + NUM_THREAD - 1) / NUM_THREAD; checkCudaErrors(cudaFree(d_min_input)); checkCudaErrors(cudaFree(d_max_input)); d_min_input = d_min_output; d_max_input = d_max_output; } checkCudaErrors(cudaMemcpy(&min_logLum, d_min_input, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&max_logLum, d_max_input, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_max_input)); checkCudaErrors(cudaFree(d_min_input)); } __global__ void _k_schared_generate_histogram(const float* const d_logLuminance, const float &min_logLum, const float &lum_range, const size_t numBins, unsigned int* const d_histogram, const unsigned int input_size) { unsigned int my_absolute_position = blockDim.y * blockIdx.x + threadIdx.x; unsigned int thread_id = threadIdx.x; extern __shared__ unsigned int shared_bins[]; if (my_absolute_position < input_size) { unsigned char bin_positon = (d_logLuminance[my_absolute_position] - min_logLum) / lum_range * numBins; shared_bins[thread_id * numBins + bin_positon]++; } else { return; } __syncthreads; for (int i = blockDim.x / 2; i > 0; i>>=1) { if (thread_id < i) { for (int j = 0; j < numBins; ++j) { shared_bins[thread_id * numBins + j] += shared_bins[(thread_id + i) * numBins + j]; } } } __syncthreads(); if (thread_id == 0) { for (int j = 0; j < numBins; ++j) { d_histogram[blockIdx.x * numBins + j] += shared_bins[j]; } } } __global__ void _k_schared_reduce_histogram(const unsigned int* const d_histogram_input, unsigned int* const d_histogram, const unsigned int input_size, const size_t numBins) { unsigned int my_absolute_position = blockDim.y * blockIdx.x + threadIdx.x; unsigned int thread_id = threadIdx.x; extern __shared__ unsigned int d_shared_histogram[]; if (my_absolute_position < input_size) { for (int i = 0; i < numBins; ++i) { d_shared_histogram[thread_id * numBins + i] = d_histogram_input[my_absolute_position * numBins + i]; } } else { return; } __syncthreads(); for (int i = blockDim.x ; i > 0; i >>= 1) { if (thread_id < i) { for (int j = 0; j < numBins; ++j) { d_shared_histogram[thread_id * numBins + j] += d_shared_histogram[(thread_id + i) * numBins + j]; } } __syncthreads(); } if (thread_id == 0) { for (int j = 0; j < numBins; ++j) { d_histogram[blockIdx.x * numBins + j] += d_shared_histogram[j]; } } } unsigned int* compute_histogram(const float* const d_logLuminance, const float &min_logLum, const float &lum_range, const size_t numBins, const size_t numRows, const size_t numCols) { unsigned int input_size = numRows * numCols; unsigned int grid_size = (input_size + NUM_THREAD - 1) / NUM_THREAD, block_size = NUM_THREAD; const unsigned int schared_memory_size = block_size * sizeof(unsigned int) * numBins; unsigned int *d_histogram; checkCudaErrors(cudaMalloc(&d_histogram, numBins * sizeof(unsigned int) * grid_size)); _k_schared_generate_histogram<<<grid_size, block_size, schared_memory_size>>> (d_logLuminance, min_logLum, lum_range, numBins, d_histogram, input_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); unsigned int *d_histogram_input = d_histogram; while(grid_size > 1) { checkCudaErrors(cudaMalloc(&d_histogram, numBins * sizeof(float) * grid_size)); _k_schared_reduce_histogram<<<grid_size, block_size, schared_memory_size>>> (d_histogram_input, d_histogram, input_size, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_histogram_input)); d_histogram_input = d_histogram; input_size = grid_size; grid_size = (grid_size + NUM_THREAD - 1) / NUM_THREAD; } return d_histogram_input; } __global__ void _k_perform_scan(unsigned int* const d_histogram, const size_t numBins) { int mid = threadIdx.x + blockDim.x * blockIdx.x; if(mid >= numBins) return; for(int s = 1; s <= numBins; s *= 2) { int spot = mid - s; unsigned int val = 0; if(spot >= 0) val = d_histogram[spot]; __syncthreads(); if(spot >= 0) d_histogram[mid] += val; __syncthreads(); } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // 1) find the minimum and maximum value in the input logLuminance channel // store in min_logLum and max_logLum float* h_logLuminance = new float[numRows * numCols * sizeof(float)]; checkCudaErrors(cudaMemcpy(h_logLuminance, d_logLuminance, numRows * numCols * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < numCols * numRows; ++i) { std::cout<<" "<<h_logLuminance[i]; } std::cout<<std::endl; find_extrem_value(d_logLuminance, min_logLum, max_logLum,numRows, numCols); std::cout<<"The minimal value is "<<min_logLum<<std::endl; std::cout<<"The maximal value is "<<max_logLum<<std::endl; // 2) subtract them to find the range float lum_range = (max_logLum - min_logLum) / (numBins * 1.0); std::cout<<"The range is "<<lum_range<<std::endl; std::cout<<"Number of bins is "<<numBins<<std::endl; // 3) generate a histogram of all the values in the logLuminance channel using // the formula: bin = (lum[i] - lumMin) / lumRange * numBins unsigned int* d_histogram = compute_histogram(d_logLuminance, min_logLum, lum_range, numBins, numRows, numCols); // 4) Perform an exclusive scan (prefix sum) on the histogram to get // the cumulative distribution of luminance values (this should go in the // incoming d_cdf pointer which already has been allocated for you) dim3 grid_size((numBins + NUM_THREAD - 1) / NUM_THREAD), block_size(NUM_THREAD); _k_perform_scan<<<grid_size, block_size>>>(d_histogram, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(d_cdf, d_histogram, sizeof(float) * numBins, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(d_histogram)); d_histogram = NULL; }
ffa4b5d7e698f7ab25959f3946016fe8e5c4dcab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "6_zoon.h" __global__ void zoon(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = (row * cols/2 + col); row *= 2; col *= 2; d_output[row * cols + col] = d_img[index]; __syncthreads(); } __global__ void zoon_corner(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * cols / 2 + col; row *= 2; col *= 2; //d_output[(row + 1) * cols + col + 1] = (d_output[(row + 2) * cols + col] + d_output[(row * cols + col + 2)] + d_output[(row + 2) * cols + col + 2] + d_output[(row * cols + col)]) / 4; __syncthreads(); } __global__ void zoon_v_h(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * cols / 2 + col; row *= 2; col *= 2; //d_output[row * cols + col + 1] = (d_output[row * cols + col] + d_output[(row - 1) * cols + col + 1] + d_output[row * cols + col + 2] + d_output[(row + 1) * cols + col + 1]) / 4; //d_output[(row + 1) * cols + col] = (d_output[row * cols + col] + d_output[(row + 1) * cols + col - 1] + d_output[(row + 1) * cols + col + 1] + d_output[(row + 2) * cols + col]) / 4; d_output[row * cols + col + 1] = (d_output[row * cols + col] + d_output[(row - 1) * cols + col + 1] + d_output[row * cols + col + 2] + d_output[(row + 1) * cols + col + 1]) / 2; d_output[(row + 1) * cols + col] = (d_output[row * cols + col] + d_output[(row + 1) * cols + col - 1] + d_output[(row + 1) * cols + col + 1] + d_output[(row + 2) * cols + col]) / 2; __syncthreads(); } void zoon_cuda(int* img, int rows, int cols, int channels, int* output) { int* d_img; int* d_output; int length = rows * cols; //int size = sizeof(int) * length * channels; int size = sizeof(int) * length; hipMalloc((void**)&d_img, size); hipMalloc((void**)&d_output, size*4); hipMemcpy(d_img, img, size, hipMemcpyHostToDevice); dim3 block_dim(16, 16); //dim3 block_dim(1, 1); dim3 grid_dim(cols /block_dim.x, rows/block_dim.y); zoon << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); zoon_corner << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); zoon_v_h << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); //dim3 num_blocks(ceil((float)cols*2 / 16), ceil((float)rows*2 / 16)); //dim3 threads_per_block(16, 16, 1); //zoon << <num_blocks, threads_per_block >> > (d_img, d_output, channels, cols * 2, rows * 2); hipMemcpy(output, d_output, size*4, hipMemcpyDeviceToHost); hipFree(d_img); hipFree(d_output); }
ffa4b5d7e698f7ab25959f3946016fe8e5c4dcab.cu
#include "6_zoon.h" __global__ void zoon(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = (row * cols/2 + col); row *= 2; col *= 2; d_output[row * cols + col] = d_img[index]; __syncthreads(); } __global__ void zoon_corner(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * cols / 2 + col; row *= 2; col *= 2; //d_output[(row + 1) * cols + col + 1] = (d_output[(row + 2) * cols + col] + d_output[(row * cols + col + 2)] + d_output[(row + 2) * cols + col + 2] + d_output[(row * cols + col)]) / 4; __syncthreads(); } __global__ void zoon_v_h(int* d_img, int* d_output, int channels, int cols, int rows) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * cols / 2 + col; row *= 2; col *= 2; //d_output[row * cols + col + 1] = (d_output[row * cols + col] + d_output[(row - 1) * cols + col + 1] + d_output[row * cols + col + 2] + d_output[(row + 1) * cols + col + 1]) / 4; //d_output[(row + 1) * cols + col] = (d_output[row * cols + col] + d_output[(row + 1) * cols + col - 1] + d_output[(row + 1) * cols + col + 1] + d_output[(row + 2) * cols + col]) / 4; d_output[row * cols + col + 1] = (d_output[row * cols + col] + d_output[(row - 1) * cols + col + 1] + d_output[row * cols + col + 2] + d_output[(row + 1) * cols + col + 1]) / 2; d_output[(row + 1) * cols + col] = (d_output[row * cols + col] + d_output[(row + 1) * cols + col - 1] + d_output[(row + 1) * cols + col + 1] + d_output[(row + 2) * cols + col]) / 2; __syncthreads(); } void zoon_cuda(int* img, int rows, int cols, int channels, int* output) { int* d_img; int* d_output; int length = rows * cols; //int size = sizeof(int) * length * channels; int size = sizeof(int) * length; cudaMalloc((void**)&d_img, size); cudaMalloc((void**)&d_output, size*4); cudaMemcpy(d_img, img, size, cudaMemcpyHostToDevice); dim3 block_dim(16, 16); //dim3 block_dim(1, 1); dim3 grid_dim(cols /block_dim.x, rows/block_dim.y); zoon << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); zoon_corner << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); zoon_v_h << <block_dim, grid_dim >> > (d_img, d_output, channels, cols * 2, rows * 2); //dim3 num_blocks(ceil((float)cols*2 / 16), ceil((float)rows*2 / 16)); //dim3 threads_per_block(16, 16, 1); //zoon << <num_blocks, threads_per_block >> > (d_img, d_output, channels, cols * 2, rows * 2); cudaMemcpy(output, d_output, size*4, cudaMemcpyDeviceToHost); cudaFree(d_img); cudaFree(d_output); }
e3db7c489dcc0acd0bda15199a1a12a6d55ce5eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __device__ __shared__ float partialSumVector[BLOCK_SIZE * 2]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x * blockDim.x * 2; if((start + t) < size) { partialSumVector[t] = in[start + t]; } else partialSumVector[t] = 0.0; if((start + blockDim.x + t) < size) { partialSumVector[blockDim.x + t] = in[start + blockDim.x + t]; } else partialSumVector[blockDim.x + t] = 0.0; for(unsigned int stride = blockDim.x; stride >= 1; stride = stride / 2) { __syncthreads(); if(t < stride) { partialSumVector[t] += partialSumVector[t + stride]; } __syncthreads(); } out[blockIdx.x] = partialSumVector[0]; }
e3db7c489dcc0acd0bda15199a1a12a6d55ce5eb.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __device__ __shared__ float partialSumVector[BLOCK_SIZE * 2]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x * blockDim.x * 2; if((start + t) < size) { partialSumVector[t] = in[start + t]; } else partialSumVector[t] = 0.0; if((start + blockDim.x + t) < size) { partialSumVector[blockDim.x + t] = in[start + blockDim.x + t]; } else partialSumVector[blockDim.x + t] = 0.0; for(unsigned int stride = blockDim.x; stride >= 1; stride = stride / 2) { __syncthreads(); if(t < stride) { partialSumVector[t] += partialSumVector[t + stride]; } __syncthreads(); } out[blockIdx.x] = partialSumVector[0]; }
bb9ea54230ccb24f9a6a2c3ce7dd54ba010f56c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __device__ void idxToCoords(const int idx, int *row, int *col, int rows, int cols) { *row = idx / rows; *col = idx % cols; return; } __device__ void coordsToIdx(const int row, const int col, int *idx, int rows, int cols) { *idx = row * cols + col; return; } __global__ void conwayThread(char *oldState, char *newState, int rows, int cols) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // extern __shared__ char dynAlloc[]; //char * row = dynAlloc; //char * above = dynAlloc + cols; //char * below = dynAlloc + 2*cols; //__shared__ char localCopy[1024]; //extern __shared__ char localCopy[]; //if( threadIdx.x == 0) //{ //for(int i = 0; i < rows * cols; i++) //{ //localCopy[i] = oldState[i]; //} //} // localCopy[threadIdx.x] = oldState[idx]; // __syncthreads(); if (idx >= rows * cols) return; // hipMemcpy(localCopy, oldState, rows * cols * sizeof(char), hipMemcpyDeviceToDevice ); //int idx = threadIdx.x + blockIdx.x * blockDim.x; //printf("This is thread %d\n", idx); //if (idx >= rows * cols) // return; int colIdx; int rowIdx; int newIdx; idxToCoords(idx, &rowIdx, &colIdx, rows, cols); coordsToIdx(rowIdx, colIdx, &newIdx, rows, cols); //printf("Block: %d, Blockdim: %d, Thread: %d, Overall %d: row %d, col %d, newIdx %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, rowIdx, colIdx, newIdx); int numLiveNeighbors = 0; int tempRow; int tempCol; int tempIdx; char tempNew; char tempVal; //printf("Thread: %d continuing\n", idx); // check left neighbor tempRow = rowIdx; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) // numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) // numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; //printf("Idx: %d has %d neighbors\n", idx, numLiveNeighbors); //if (localCopy[threadIdx.x] == 1) //__syncthreads(); //localCopy[threadIdx.x] = oldState[idx]; //__syncthreads(); //tempVal = oldState[idx]; //__syncthreads(); if(oldState[idx] == 1) { if (numLiveNeighbors < 2 || numLiveNeighbors > 3) { tempNew = 0; //localCopy[threadIdx.x] = 0; } else { tempNew = 1; //localCopy[threadIdx.x] = 1; } } else { if (numLiveNeighbors == 3) { tempNew = 1; //localCopy[threadIdx.x] = 1; } else { tempNew = 0; //localCopy[threadIdx.x] = 0; } } __syncthreads(); newState[idx] = tempNew; //newState[idx] = localCopy[threadIdx.x]; return; //printf("Cell %d has %d live neighbors\n", idx, numLiveNeighbors); } void printBoard(char *board, int rows, int cols) { int counter = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (board[counter] == 0) printf("-"); else printf("0"); counter++; } printf("\n"); } return; } int main() { //const int arraySize = 5; //const int a[arraySize] = { 1, 2, 3, 4, 5 }; //const int b[arraySize] = { 10, 20, 30, 40, 50 }; //int c[arraySize] = { 0 }; const int iterations = 100; const int rows = 256; const int cols = 256; const int boardSize = rows * cols; char prevState[boardSize]; char nextState[boardSize]; char *gpu_prevState = 0; char *gpu_nextState = 0; for (int i = 0; i < boardSize; i++) prevState[i] = rand() % 2; printf("Beginning state:\n"); printBoard(prevState, rows, cols); hipError_t errors; errors = hipSetDevice(0); hipDeviceProp_t props; errors = hipGetDeviceProperties(&props, 0); int nBlocks; printf("Max threads: %d\n", props.maxThreadsPerBlock); int temp = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock))); printf("Temp: %d\n", temp); if ((boardSize % props.maxThreadsPerBlock) != 0) nBlocks = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock))) / props.maxThreadsPerBlock; else nBlocks = boardSize / props.maxThreadsPerBlock; printf("Blocks: %d\n", nBlocks); if (errors != hipSuccess) { printf("Error setting device\n"); exit(0); } errors = hipMalloc((void **)&gpu_prevState, boardSize * sizeof(char)); if (errors != hipSuccess) { printf("Error allocating previous state\n"); exit(0); } errors = hipMalloc((void **)&gpu_nextState, boardSize * sizeof(char)); if (errors != hipSuccess) { printf("Error allocating next state\n"); exit(0); } errors = hipMemcpy(gpu_prevState, prevState, boardSize * sizeof(char), hipMemcpyHostToDevice); if (errors != hipSuccess) { printf("Error copying previous state\n"); exit(0); } errors = hipMemcpy(gpu_nextState, nextState, boardSize * sizeof(char), hipMemcpyHostToDevice); if (errors != hipSuccess) { printf("Error copying next state\n"); exit(0); } for (int i = 0; i < iterations; i++) { printf("On iteration %d\n", i); hipLaunchKernelGGL(( conwayThread) , dim3(nBlocks * 4), dim3(props.maxThreadsPerBlock / 4), 0, 0, gpu_prevState, gpu_nextState, rows, cols); errors = hipGetLastError(); if (errors != hipSuccess) { printf("Error launching kernel\n"); printf("%s\n", hipGetErrorString(errors)); exit(0); } errors = hipDeviceSynchronize(); if (errors != hipSuccess) { printf("Error synchronizing device\n"); printf("%s\n", hipGetErrorString(errors)); exit(0); } // Copy through the host //hipMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), hipMemcpyDeviceToHost); //hipMemcpy(gpu_prevState, nextState, boardSize * sizeof(char), hipMemcpyHostToDevice); // Copy directly hipMemcpy(gpu_prevState, gpu_nextState, boardSize * sizeof(char), hipMemcpyDeviceToDevice); } hipMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), hipMemcpyDeviceToHost); printf("Final state\n"); printBoard(nextState, rows, cols); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. errors = hipDeviceReset(); if (errors != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }
bb9ea54230ccb24f9a6a2c3ce7dd54ba010f56c0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __device__ void idxToCoords(const int idx, int *row, int *col, int rows, int cols) { *row = idx / rows; *col = idx % cols; return; } __device__ void coordsToIdx(const int row, const int col, int *idx, int rows, int cols) { *idx = row * cols + col; return; } __global__ void conwayThread(char *oldState, char *newState, int rows, int cols) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // extern __shared__ char dynAlloc[]; //char * row = dynAlloc; //char * above = dynAlloc + cols; //char * below = dynAlloc + 2*cols; //__shared__ char localCopy[1024]; //extern __shared__ char localCopy[]; //if( threadIdx.x == 0) //{ //for(int i = 0; i < rows * cols; i++) //{ //localCopy[i] = oldState[i]; //} //} // localCopy[threadIdx.x] = oldState[idx]; // __syncthreads(); if (idx >= rows * cols) return; // cudaMemcpy(localCopy, oldState, rows * cols * sizeof(char), cudaMemcpyDeviceToDevice ); //int idx = threadIdx.x + blockIdx.x * blockDim.x; //printf("This is thread %d\n", idx); //if (idx >= rows * cols) // return; int colIdx; int rowIdx; int newIdx; idxToCoords(idx, &rowIdx, &colIdx, rows, cols); coordsToIdx(rowIdx, colIdx, &newIdx, rows, cols); //printf("Block: %d, Blockdim: %d, Thread: %d, Overall %d: row %d, col %d, newIdx %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, rowIdx, colIdx, newIdx); int numLiveNeighbors = 0; int tempRow; int tempCol; int tempIdx; char tempNew; char tempVal; //printf("Thread: %d continuing\n", idx); // check left neighbor tempRow = rowIdx; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) // numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) // numLiveNeighbors++; tempRow = rowIdx - 1; if (tempRow < 0) tempRow = rows - 1; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx + 1; if (tempCol >= cols) tempCol = 0; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; tempRow = rowIdx + 1; if (tempRow >= rows) tempRow = 0; tempCol = colIdx - 1; if (tempCol < 0) tempCol = cols - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); //if(idx == 0) //printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; //tempVal = oldState[tempIdx]; //__syncthreads(); //if(tempVal == 1) //numLiveNeighbors++; //printf("Idx: %d has %d neighbors\n", idx, numLiveNeighbors); //if (localCopy[threadIdx.x] == 1) //__syncthreads(); //localCopy[threadIdx.x] = oldState[idx]; //__syncthreads(); //tempVal = oldState[idx]; //__syncthreads(); if(oldState[idx] == 1) { if (numLiveNeighbors < 2 || numLiveNeighbors > 3) { tempNew = 0; //localCopy[threadIdx.x] = 0; } else { tempNew = 1; //localCopy[threadIdx.x] = 1; } } else { if (numLiveNeighbors == 3) { tempNew = 1; //localCopy[threadIdx.x] = 1; } else { tempNew = 0; //localCopy[threadIdx.x] = 0; } } __syncthreads(); newState[idx] = tempNew; //newState[idx] = localCopy[threadIdx.x]; return; //printf("Cell %d has %d live neighbors\n", idx, numLiveNeighbors); } void printBoard(char *board, int rows, int cols) { int counter = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (board[counter] == 0) printf("-"); else printf("0"); counter++; } printf("\n"); } return; } int main() { //const int arraySize = 5; //const int a[arraySize] = { 1, 2, 3, 4, 5 }; //const int b[arraySize] = { 10, 20, 30, 40, 50 }; //int c[arraySize] = { 0 }; const int iterations = 100; const int rows = 256; const int cols = 256; const int boardSize = rows * cols; char prevState[boardSize]; char nextState[boardSize]; char *gpu_prevState = 0; char *gpu_nextState = 0; for (int i = 0; i < boardSize; i++) prevState[i] = rand() % 2; printf("Beginning state:\n"); printBoard(prevState, rows, cols); cudaError_t errors; errors = cudaSetDevice(0); cudaDeviceProp props; errors = cudaGetDeviceProperties(&props, 0); int nBlocks; printf("Max threads: %d\n", props.maxThreadsPerBlock); int temp = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock))); printf("Temp: %d\n", temp); if ((boardSize % props.maxThreadsPerBlock) != 0) nBlocks = (boardSize + (props.maxThreadsPerBlock - (boardSize % props.maxThreadsPerBlock))) / props.maxThreadsPerBlock; else nBlocks = boardSize / props.maxThreadsPerBlock; printf("Blocks: %d\n", nBlocks); if (errors != cudaSuccess) { printf("Error setting device\n"); exit(0); } errors = cudaMalloc((void **)&gpu_prevState, boardSize * sizeof(char)); if (errors != cudaSuccess) { printf("Error allocating previous state\n"); exit(0); } errors = cudaMalloc((void **)&gpu_nextState, boardSize * sizeof(char)); if (errors != cudaSuccess) { printf("Error allocating next state\n"); exit(0); } errors = cudaMemcpy(gpu_prevState, prevState, boardSize * sizeof(char), cudaMemcpyHostToDevice); if (errors != cudaSuccess) { printf("Error copying previous state\n"); exit(0); } errors = cudaMemcpy(gpu_nextState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice); if (errors != cudaSuccess) { printf("Error copying next state\n"); exit(0); } for (int i = 0; i < iterations; i++) { printf("On iteration %d\n", i); conwayThread <<<nBlocks * 4, props.maxThreadsPerBlock / 4>>>(gpu_prevState, gpu_nextState, rows, cols); errors = cudaGetLastError(); if (errors != cudaSuccess) { printf("Error launching kernel\n"); printf("%s\n", cudaGetErrorString(errors)); exit(0); } errors = cudaDeviceSynchronize(); if (errors != cudaSuccess) { printf("Error synchronizing device\n"); printf("%s\n", cudaGetErrorString(errors)); exit(0); } // Copy through the host //cudaMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToHost); //cudaMemcpy(gpu_prevState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice); // Copy directly cudaMemcpy(gpu_prevState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToDevice); } cudaMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToHost); printf("Final state\n"); printBoard(nextState, rows, cols); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. errors = cudaDeviceReset(); if (errors != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
00218c3760b4da51ccf8c79104304fd69c5ec942.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // Contains GPU Cuda code that executes BFS algorithm // STL // Internal Headers // taken from global_memory.cu, Creates event and records time __global__ void BFSLevels(int *vertices, int *edges, int *distances, int *predecessors, int *vertIndices, int *edgeSize, bool *levels, bool *visitedVertices, bool *foundDest, int numVert, int destination) { // Grab ThreadID int thrID = threadIdx.x + blockIdx.x * blockDim.x; __shared__ bool destFound; destFound = false; if (thrID < numVert && !destFound) { int curVert = vertices[thrID]; // Iterate through level if true if (levels[curVert]) { levels[curVert] = false; visitedVertices[curVert] = true; // Grab indexes for curVert edges in edge array int edgesEnd = edgeSize[thrID] + vertIndices[thrID]; // Iterate through all edges for current vertex for (int edgeIter = vertIndices[thrID]; edgeIter < edgesEnd; ++edgeIter) { // Grab next Vertex at end of edge int nextVert = edges[edgeIter]; // If it hasn't been visited store info // for distance and predecessors and set level // to true for next level of vertices if (!visitedVertices[nextVert]) { distances[nextVert] = distances[curVert] + 1; levels[nextVert] = true; predecessors[nextVert] = curVert; // Set found destination to true and sync threads if (nextVert == destination) { *foundDest = true; destFound = true; __syncthreads(); } } } } } }
00218c3760b4da51ccf8c79104304fd69c5ec942.cu
#include "includes.h" // Contains GPU Cuda code that executes BFS algorithm // STL // Internal Headers // taken from global_memory.cu, Creates event and records time __global__ void BFSLevels(int *vertices, int *edges, int *distances, int *predecessors, int *vertIndices, int *edgeSize, bool *levels, bool *visitedVertices, bool *foundDest, int numVert, int destination) { // Grab ThreadID int thrID = threadIdx.x + blockIdx.x * blockDim.x; __shared__ bool destFound; destFound = false; if (thrID < numVert && !destFound) { int curVert = vertices[thrID]; // Iterate through level if true if (levels[curVert]) { levels[curVert] = false; visitedVertices[curVert] = true; // Grab indexes for curVert edges in edge array int edgesEnd = edgeSize[thrID] + vertIndices[thrID]; // Iterate through all edges for current vertex for (int edgeIter = vertIndices[thrID]; edgeIter < edgesEnd; ++edgeIter) { // Grab next Vertex at end of edge int nextVert = edges[edgeIter]; // If it hasn't been visited store info // for distance and predecessors and set level // to true for next level of vertices if (!visitedVertices[nextVert]) { distances[nextVert] = distances[curVert] + 1; levels[nextVert] = true; predecessors[nextVert] = curVert; // Set found destination to true and sync threads if (nextVert == destination) { *foundDest = true; destFound = true; __syncthreads(); } } } } } }
aa97c347b753b79405b13768fc73212fc99a8395.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Im2Col.h" #include "hl_device_functions.cuh" namespace paddle { template <class T> __global__ void im2col(const T* data_im, int numOuts, int height, int width, int blockH, int blockW, int strideH, int strideW, int paddingH, int paddingW, int dilationH, int dilationW, int height_col, int width_col, T* data_col) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < numOuts) { int w_out = index % width_col; index /= width_col; int h_out = index % height_col; int channel_in = index / height_col; int channel_out = channel_in * blockH * blockW; int h_in = h_out * strideH; int w_in = w_out * strideW; data_col += (channel_out * height_col + h_out) * width_col + w_out; for (int i = 0; i < blockH; ++i) { for (int j = 0; j < blockW; ++j) { int rIdx = int(h_in + i * dilationH); int cIdx = int(w_in + j * dilationW); if ((rIdx - (int)paddingH) >= (int)height || (rIdx - (int)paddingH) < 0 || (cIdx - (int)paddingW) >= (int)width || (cIdx - (int)paddingW) < 0) { *data_col = 0; } else { rIdx = rIdx + channel_in * height - paddingH; cIdx = cIdx - paddingW; *data_col = data_im[rIdx * width + cIdx]; } data_col += height_col * width_col; } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [inputChannels, filterHeight, filterWidth, outputHeight, outputWidth] */ template <class T> class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, T> { public: void operator()(const T* imData, const TensorShape& imShape, T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[1]; int filterWidth = colShape[2]; int outputHeight = colShape[3]; int outputWidth = colShape[4]; int numKernels = inputChannels * outputHeight * outputWidth; int blocks = (numKernels + 1024 - 1) / 1024; int blockX = 512; int blockY = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(blockX, blockY); hipLaunchKernelGGL(( im2col<T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, imData, numKernels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth, colData); CHECK_SYNC("Im2ColFunctor GPU failed"); } }; template <class T> __global__ void col2im(size_t n, const T* data_col, size_t height, size_t width, size_t channels, size_t blockH, size_t blockW, size_t strideH, size_t strideW, size_t paddingH, size_t paddingW, size_t dilationH, size_t dilationW, size_t height_col, size_t width_col, T* data_im) { size_t index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < n) { T val = 0; int w = int(index % width); int h = int((index / width) % height); int c = int(index / (width * height)); int filterH = (blockH - 1) * dilationH + 1; int filterW = (blockW - 1) * dilationW + 1; if ((w - (int)paddingW) >= 0 && (w - (int)paddingW) < (width - 2 * paddingW) && (h - (int)paddingH) >= 0 && (h - paddingH) < (height - 2 * paddingH)) { // compute the start and end of the output int w_col_start = (w < (int)filterW) ? 0 : (w - int(filterW)) / (int)strideW + 1; int w_col_end = min((int)(w / (int)strideW + 1), (int)(width_col)); int h_col_start = (h < (int)filterH) ? 0 : (h - (int)filterH) / (int)strideH + 1; int h_col_end = min(int(h / strideH + 1), int(height_col)); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { // the col location: [c * width * height + h_out, w_out] int h_k = (h - h_col * strideH); int w_k = (w - w_col * strideW); if (h_k % dilationH == 0 && w_k % dilationW == 0) { h_k /= dilationH; w_k /= dilationW; int c_col = (((c * blockH + h_k) * blockW + w_k) * height_col + h_col) * width_col + w_col; val += data_col[c_col]; } } } h -= paddingH; w -= paddingW; data_im[c * ((width - 2 * paddingW) * (height - 2 * paddingH)) + h * (width - 2 * paddingW) + w] += val; } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [inputChannels, filterHeight, filterWidth, outputHeight, outputWidth] */ template <class T> class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, T> { public: void operator()(T* imData, const TensorShape& imShape, const T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[1]; int filterWidth = colShape[2]; int outputHeight = colShape[3]; int outputWidth = colShape[4]; size_t numKernels = inputChannels * (inputHeight + 2 * paddingHeight) * (inputWidth + 2 * paddingWidth); size_t blocks = (numKernels + 1024 - 1) / 1024; size_t blockX = 512; size_t blockY = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(blockX, blockY); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernelGGL(( col2im<T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, numKernels, colData, inputHeight + 2 * paddingHeight, inputWidth + 2 * paddingWidth, inputChannels, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth, imData); CHECK_SYNC("Col2ImFunctor GPU failed"); } }; template class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, float>; template class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, double>; template class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, float>; template class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, double>; template <class T> __global__ void im2colOCF(const T* imData, T* colData, int inputChannels, int inputHeight, int inputWidth, int filterHeight, int filterWidth, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth, int outputHeight, int outputWidth) { int swId = blockIdx.x; int shId = blockIdx.y; for (int channelId = threadIdx.z; channelId < inputChannels; channelId += blockDim.z) { for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) { int widthOffset = idx * dilationHeight + swId * strideWidth - paddingWidth; int heightOffset = idy * dilationWidth + shId * strideHeight - paddingHeight; int imOffset = widthOffset + heightOffset * inputWidth + channelId * inputHeight * inputWidth; int colOffset = idx + idy * filterWidth + channelId * filterHeight * filterWidth + (shId * outputWidth + swId) * (inputChannels * filterHeight * filterWidth); if (heightOffset >= inputHeight || heightOffset < 0 || widthOffset >= inputWidth || widthOffset < 0) { colData[colOffset] = T(0); } else { colData[colOffset] = imData[imOffset]; } } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth] */ template <class T> class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, T> { public: void operator()(const T* imData, const TensorShape& imShape, T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[3]; int filterWidth = colShape[4]; int outputHeight = colShape[0]; int outputWidth = colShape[1]; int blockDimX = 0; int blockDimY = 0; if (filterHeight <= 4 && filterWidth <= 4) { blockDimX = 4; blockDimY = 4; } else if (filterHeight <= 8 && filterWidth <= 8) { blockDimX = 8; blockDimY = 8; } else if (filterHeight <= 16 && filterWidth <= 16) { blockDimX = 16; blockDimY = 16; } else { blockDimX = 32; blockDimY = 32; } int blockDimZ = 1024 / blockDimX / blockDimY; dim3 threads(blockDimX, blockDimY, ::min(blockDimZ, inputChannels)); dim3 grid(outputWidth, outputHeight); hipLaunchKernelGGL(( im2colOCF<T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, imData, colData, inputChannels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth); CHECK_SYNC("Im2ColFunctor GPU failed"); } }; template <class T> __global__ void col2imOCF(T* imData, const T* colData, int inputChannels, int inputHeight, int inputWidth, int filterHeight, int filterWidth, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth, int outputHeight, int outputWidth) { int swId = blockIdx.x; int shId = blockIdx.y; for (int channelId = threadIdx.z; channelId < inputChannels; channelId += blockDim.z) { for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) { int widthOffset = idx * dilationWidth + swId * strideWidth - paddingWidth; int heightOffset = idy * dilationHeight + shId * strideHeight - paddingHeight; int imOffset = widthOffset + heightOffset * inputWidth + channelId * inputHeight * inputWidth; int colOffset = idx + idy * filterWidth + channelId * filterHeight * filterWidth + (shId * outputWidth + swId) * (inputChannels * filterHeight * filterWidth); if (heightOffset >= 0 && heightOffset < inputHeight && widthOffset >= 0 && widthOffset < inputWidth) { paddle::paddleAtomicAdd(imData + imOffset, colData[colOffset]); } } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth] */ template <class T> class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, T> { public: void operator()(T* imData, const TensorShape& imShape, const T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[3]; int filterWidth = colShape[4]; int outputHeight = colShape[0]; int outputWidth = colShape[1]; int blockDimX = 0; int blockDimY = 0; if (filterHeight <= 4 && filterWidth <= 4) { blockDimX = 4; blockDimY = 4; } else if (filterHeight <= 8 && filterWidth <= 8) { blockDimX = 8; blockDimY = 8; } else if (filterHeight <= 16 && filterWidth <= 16) { blockDimX = 16; blockDimY = 16; } else { blockDimX = 32; blockDimY = 32; } int blockDimZ = 1024 / blockDimX / blockDimY; dim3 threads(blockDimX, blockDimY, ::min(blockDimZ, inputChannels)); dim3 grid(outputWidth, outputHeight); hipLaunchKernelGGL(( col2imOCF<T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, imData, colData, inputChannels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth); CHECK_SYNC("Col2ImFunctor GPU failed"); } }; template class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, float>; template class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, double>; template class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, float>; template class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, double>; } // namespace paddle
aa97c347b753b79405b13768fc73212fc99a8395.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Im2Col.h" #include "hl_device_functions.cuh" namespace paddle { template <class T> __global__ void im2col(const T* data_im, int numOuts, int height, int width, int blockH, int blockW, int strideH, int strideW, int paddingH, int paddingW, int dilationH, int dilationW, int height_col, int width_col, T* data_col) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < numOuts) { int w_out = index % width_col; index /= width_col; int h_out = index % height_col; int channel_in = index / height_col; int channel_out = channel_in * blockH * blockW; int h_in = h_out * strideH; int w_in = w_out * strideW; data_col += (channel_out * height_col + h_out) * width_col + w_out; for (int i = 0; i < blockH; ++i) { for (int j = 0; j < blockW; ++j) { int rIdx = int(h_in + i * dilationH); int cIdx = int(w_in + j * dilationW); if ((rIdx - (int)paddingH) >= (int)height || (rIdx - (int)paddingH) < 0 || (cIdx - (int)paddingW) >= (int)width || (cIdx - (int)paddingW) < 0) { *data_col = 0; } else { rIdx = rIdx + channel_in * height - paddingH; cIdx = cIdx - paddingW; *data_col = data_im[rIdx * width + cIdx]; } data_col += height_col * width_col; } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [inputChannels, filterHeight, filterWidth, outputHeight, outputWidth] */ template <class T> class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, T> { public: void operator()(const T* imData, const TensorShape& imShape, T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[1]; int filterWidth = colShape[2]; int outputHeight = colShape[3]; int outputWidth = colShape[4]; int numKernels = inputChannels * outputHeight * outputWidth; int blocks = (numKernels + 1024 - 1) / 1024; int blockX = 512; int blockY = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(blockX, blockY); im2col<T><<<grid, threads, 0, STREAM_DEFAULT>>>(imData, numKernels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth, colData); CHECK_SYNC("Im2ColFunctor GPU failed"); } }; template <class T> __global__ void col2im(size_t n, const T* data_col, size_t height, size_t width, size_t channels, size_t blockH, size_t blockW, size_t strideH, size_t strideW, size_t paddingH, size_t paddingW, size_t dilationH, size_t dilationW, size_t height_col, size_t width_col, T* data_im) { size_t index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < n) { T val = 0; int w = int(index % width); int h = int((index / width) % height); int c = int(index / (width * height)); int filterH = (blockH - 1) * dilationH + 1; int filterW = (blockW - 1) * dilationW + 1; if ((w - (int)paddingW) >= 0 && (w - (int)paddingW) < (width - 2 * paddingW) && (h - (int)paddingH) >= 0 && (h - paddingH) < (height - 2 * paddingH)) { // compute the start and end of the output int w_col_start = (w < (int)filterW) ? 0 : (w - int(filterW)) / (int)strideW + 1; int w_col_end = min((int)(w / (int)strideW + 1), (int)(width_col)); int h_col_start = (h < (int)filterH) ? 0 : (h - (int)filterH) / (int)strideH + 1; int h_col_end = min(int(h / strideH + 1), int(height_col)); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { // the col location: [c * width * height + h_out, w_out] int h_k = (h - h_col * strideH); int w_k = (w - w_col * strideW); if (h_k % dilationH == 0 && w_k % dilationW == 0) { h_k /= dilationH; w_k /= dilationW; int c_col = (((c * blockH + h_k) * blockW + w_k) * height_col + h_col) * width_col + w_col; val += data_col[c_col]; } } } h -= paddingH; w -= paddingW; data_im[c * ((width - 2 * paddingW) * (height - 2 * paddingH)) + h * (width - 2 * paddingW) + w] += val; } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [inputChannels, filterHeight, filterWidth, outputHeight, outputWidth] */ template <class T> class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, T> { public: void operator()(T* imData, const TensorShape& imShape, const T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[1]; int filterWidth = colShape[2]; int outputHeight = colShape[3]; int outputWidth = colShape[4]; size_t numKernels = inputChannels * (inputHeight + 2 * paddingHeight) * (inputWidth + 2 * paddingWidth); size_t blocks = (numKernels + 1024 - 1) / 1024; size_t blockX = 512; size_t blockY = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(blockX, blockY); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im<T><<<grid, threads, 0, STREAM_DEFAULT>>>( numKernels, colData, inputHeight + 2 * paddingHeight, inputWidth + 2 * paddingWidth, inputChannels, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth, imData); CHECK_SYNC("Col2ImFunctor GPU failed"); } }; template class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, float>; template class Im2ColFunctor<kCFO, DEVICE_TYPE_GPU, double>; template class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, float>; template class Col2ImFunctor<kCFO, DEVICE_TYPE_GPU, double>; template <class T> __global__ void im2colOCF(const T* imData, T* colData, int inputChannels, int inputHeight, int inputWidth, int filterHeight, int filterWidth, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth, int outputHeight, int outputWidth) { int swId = blockIdx.x; int shId = blockIdx.y; for (int channelId = threadIdx.z; channelId < inputChannels; channelId += blockDim.z) { for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) { int widthOffset = idx * dilationHeight + swId * strideWidth - paddingWidth; int heightOffset = idy * dilationWidth + shId * strideHeight - paddingHeight; int imOffset = widthOffset + heightOffset * inputWidth + channelId * inputHeight * inputWidth; int colOffset = idx + idy * filterWidth + channelId * filterHeight * filterWidth + (shId * outputWidth + swId) * (inputChannels * filterHeight * filterWidth); if (heightOffset >= inputHeight || heightOffset < 0 || widthOffset >= inputWidth || widthOffset < 0) { colData[colOffset] = T(0); } else { colData[colOffset] = imData[imOffset]; } } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth] */ template <class T> class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, T> { public: void operator()(const T* imData, const TensorShape& imShape, T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[3]; int filterWidth = colShape[4]; int outputHeight = colShape[0]; int outputWidth = colShape[1]; int blockDimX = 0; int blockDimY = 0; if (filterHeight <= 4 && filterWidth <= 4) { blockDimX = 4; blockDimY = 4; } else if (filterHeight <= 8 && filterWidth <= 8) { blockDimX = 8; blockDimY = 8; } else if (filterHeight <= 16 && filterWidth <= 16) { blockDimX = 16; blockDimY = 16; } else { blockDimX = 32; blockDimY = 32; } int blockDimZ = 1024 / blockDimX / blockDimY; dim3 threads(blockDimX, blockDimY, std::min(blockDimZ, inputChannels)); dim3 grid(outputWidth, outputHeight); im2colOCF<T><<<grid, threads, 0, STREAM_DEFAULT>>>(imData, colData, inputChannels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth); CHECK_SYNC("Im2ColFunctor GPU failed"); } }; template <class T> __global__ void col2imOCF(T* imData, const T* colData, int inputChannels, int inputHeight, int inputWidth, int filterHeight, int filterWidth, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth, int outputHeight, int outputWidth) { int swId = blockIdx.x; int shId = blockIdx.y; for (int channelId = threadIdx.z; channelId < inputChannels; channelId += blockDim.z) { for (int idy = threadIdx.y; idy < filterHeight; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filterWidth; idx += blockDim.x) { int widthOffset = idx * dilationWidth + swId * strideWidth - paddingWidth; int heightOffset = idy * dilationHeight + shId * strideHeight - paddingHeight; int imOffset = widthOffset + heightOffset * inputWidth + channelId * inputHeight * inputWidth; int colOffset = idx + idy * filterWidth + channelId * filterHeight * filterWidth + (shId * outputWidth + swId) * (inputChannels * filterHeight * filterWidth); if (heightOffset >= 0 && heightOffset < inputHeight && widthOffset >= 0 && widthOffset < inputWidth) { paddle::paddleAtomicAdd(imData + imOffset, colData[colOffset]); } } } } } /* * imShape = [inputChannels, inputHeight, inputWidth] * colShape = * [outputHeight, outputWidth, inputChannels, filterHeight, filterWidth] */ template <class T> class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, T> { public: void operator()(T* imData, const TensorShape& imShape, const T* colData, const TensorShape& colShape, int strideHeight, int strideWidth, int paddingHeight, int paddingWidth, int dilationHeight, int dilationWidth) { int inputChannels = imShape[0]; int inputHeight = imShape[1]; int inputWidth = imShape[2]; int filterHeight = colShape[3]; int filterWidth = colShape[4]; int outputHeight = colShape[0]; int outputWidth = colShape[1]; int blockDimX = 0; int blockDimY = 0; if (filterHeight <= 4 && filterWidth <= 4) { blockDimX = 4; blockDimY = 4; } else if (filterHeight <= 8 && filterWidth <= 8) { blockDimX = 8; blockDimY = 8; } else if (filterHeight <= 16 && filterWidth <= 16) { blockDimX = 16; blockDimY = 16; } else { blockDimX = 32; blockDimY = 32; } int blockDimZ = 1024 / blockDimX / blockDimY; dim3 threads(blockDimX, blockDimY, std::min(blockDimZ, inputChannels)); dim3 grid(outputWidth, outputHeight); col2imOCF<T><<<grid, threads, 0, STREAM_DEFAULT>>>(imData, colData, inputChannels, inputHeight, inputWidth, filterHeight, filterWidth, strideHeight, strideWidth, paddingHeight, paddingWidth, dilationHeight, dilationWidth, outputHeight, outputWidth); CHECK_SYNC("Col2ImFunctor GPU failed"); } }; template class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, float>; template class Im2ColFunctor<kOCF, DEVICE_TYPE_GPU, double>; template class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, float>; template class Col2ImFunctor<kOCF, DEVICE_TYPE_GPU, double>; } // namespace paddle
3c25732aeb733eccc97ed3ccab23c45d32db3e51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define Pipe ThreeElementGPipe<message> #include "threeElementGpipe.cu" #include "testCommon.cu" #define PIPE_NAME "/tmp/myfifo2" void runPipeExperiment(GpuTimer h_timers[NUMBER_OF_REQUESTS], int msg_per_req, bool isConsumer) { // Create reduction array int* d_reductionResults; CUDA_CHECK(hipMalloc((void**)&d_reductionResults, sizeof(message)*NUMBER_OF_THREADS)); // Create messages array message *d_messages; message *h_messages = create_messages(msg_per_req*NUMBER_OF_THREADS); CUDA_CHECK(hipMalloc((void**)&d_messages, sizeof(message)*msg_per_req*NUMBER_OF_THREADS)); CUDA_CHECK(hipMemcpy(d_messages, h_messages, sizeof(message)*msg_per_req*NUMBER_OF_THREADS, hipMemcpyHostToDevice)); // Create Pipe Pipe h_pipe(PIPE_NAME, PIPE_QUEUE_SIZE_IN_MESSAGET, NUMBER_OF_THREADS, isConsumer); h_pipe.init(); Pipe* d_pipe; CUDA_CHECK(hipMalloc((void**)&d_pipe, sizeof(Pipe))); CUDA_CHECK(hipMemcpy(d_pipe, &h_pipe, sizeof(Pipe), hipMemcpyHostToDevice)); // Copy timers to device GpuTimer *d_timers; CUDA_CHECK(hipMalloc((void**)&d_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS)); CUDA_CHECK(hipMemcpy(d_timers, h_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS, hipMemcpyHostToDevice)); message * d_recived_messages_arr; CUDA_CHECK(hipMalloc((void**)&d_recived_messages_arr, sizeof(message)*msg_per_req*NUMBER_OF_THREADS)); CUDA_CHECK(hipDeviceSynchronize()); // Run kernels if (isConsumer) hipLaunchKernelGGL(( consumer_kernel) , dim3(1), dim3(NUMBER_OF_THREADS), 0, 0, d_pipe, d_messages, msg_per_req, d_timers, d_recived_messages_arr, d_reductionResults); else hipLaunchKernelGGL(( producer_kernel) , dim3(1), dim3(NUMBER_OF_THREADS), 0, 0, d_pipe, d_messages, msg_per_req, d_timers); CUDA_CHECK(hipDeviceSynchronize()); dbg_printf("%s: Finish kernel sync\n", isConsumer ? "Consumer" : "Producer"); h_pipe.gclose(); // Copy results to host CUDA_CHECK(hipMemcpy(h_timers, d_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS, hipMemcpyDeviceToHost)); CUDA_CHECK(hipFree(d_pipe)); CUDA_CHECK(hipFree(d_timers)); CUDA_CHECK(hipFree(d_messages)); CUDA_CHECK(hipFree(d_recived_messages_arr)); CUDA_CHECK(hipFree(d_reductionResults)); delete(h_messages); } void RunTest() { int number_of_messages[NUMBER_OF_RUNS] = MASSAGES_PER_REQUEST_ARR; const char * testPipe = "testPipe"; mkfifo(testPipe, 0666); int pid = fork(); bool isConsumer = !(pid == 0); const char* process_name = (isConsumer) ? "consumer" : "producer"; const char* doneMessage = DONE_MSG; char receivedMessage[DONE_MSG_LEN]; int _fd = open(testPipe, (isConsumer) ? O_WRONLY : O_RDONLY ); GpuTimer h_Timers[NUMBER_OF_RUNS][NUMBER_OF_REQUESTS]; for (int run_num = 0; run_num < NUMBER_OF_RUNS; run_num++) { printf("%s: Run number %d\n", process_name, run_num + 1); runPipeExperiment(h_Timers[run_num], number_of_messages[run_num], isConsumer); dbg_printf("%s finished run number %d\n", process_name, run_num); // Sync if (isConsumer) { if (write(_fd, (void*)(doneMessage), sizeof(char) * 5) == -1) { perror("Consumer sync write"); exit(-1); } } else { if (read(_fd, (void*)(receivedMessage), sizeof(char) * 5) == -1) { perror("Producer sync read"); exit(-1); } if (strcmp(receivedMessage, doneMessage) != 0) dbg_printf("Error: Something went wrong, message is not done\n"); } } char title[100]; sprintf(title, "Three_Element_Gpipe_%s", process_name); save_results(h_Timers, number_of_messages, title); } int main(int argc, char *argv[]) { SetTestArguments(argc, argv); printf("Start three element GPU pipe test\n"); RunTest(); printf("done\n"); return 0; }
3c25732aeb733eccc97ed3ccab23c45d32db3e51.cu
#define Pipe ThreeElementGPipe<message> #include "threeElementGpipe.cu" #include "testCommon.cu" #define PIPE_NAME "/tmp/myfifo2" void runPipeExperiment(GpuTimer h_timers[NUMBER_OF_REQUESTS], int msg_per_req, bool isConsumer) { // Create reduction array int* d_reductionResults; CUDA_CHECK(cudaMalloc((void**)&d_reductionResults, sizeof(message)*NUMBER_OF_THREADS)); // Create messages array message *d_messages; message *h_messages = create_messages(msg_per_req*NUMBER_OF_THREADS); CUDA_CHECK(cudaMalloc((void**)&d_messages, sizeof(message)*msg_per_req*NUMBER_OF_THREADS)); CUDA_CHECK(cudaMemcpy(d_messages, h_messages, sizeof(message)*msg_per_req*NUMBER_OF_THREADS, cudaMemcpyHostToDevice)); // Create Pipe Pipe h_pipe(PIPE_NAME, PIPE_QUEUE_SIZE_IN_MESSAGET, NUMBER_OF_THREADS, isConsumer); h_pipe.init(); Pipe* d_pipe; CUDA_CHECK(cudaMalloc((void**)&d_pipe, sizeof(Pipe))); CUDA_CHECK(cudaMemcpy(d_pipe, &h_pipe, sizeof(Pipe), cudaMemcpyHostToDevice)); // Copy timers to device GpuTimer *d_timers; CUDA_CHECK(cudaMalloc((void**)&d_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS)); CUDA_CHECK(cudaMemcpy(d_timers, h_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS, cudaMemcpyHostToDevice)); message * d_recived_messages_arr; CUDA_CHECK(cudaMalloc((void**)&d_recived_messages_arr, sizeof(message)*msg_per_req*NUMBER_OF_THREADS)); CUDA_CHECK(cudaDeviceSynchronize()); // Run kernels if (isConsumer) consumer_kernel <<<1, NUMBER_OF_THREADS, 0>>> (d_pipe, d_messages, msg_per_req, d_timers, d_recived_messages_arr, d_reductionResults); else producer_kernel <<<1, NUMBER_OF_THREADS, 0>>> (d_pipe, d_messages, msg_per_req, d_timers); CUDA_CHECK(cudaDeviceSynchronize()); dbg_printf("%s: Finish kernel sync\n", isConsumer ? "Consumer" : "Producer"); h_pipe.gclose(); // Copy results to host CUDA_CHECK(cudaMemcpy(h_timers, d_timers, sizeof(GpuTimer)*NUMBER_OF_REQUESTS, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(d_pipe)); CUDA_CHECK(cudaFree(d_timers)); CUDA_CHECK(cudaFree(d_messages)); CUDA_CHECK(cudaFree(d_recived_messages_arr)); CUDA_CHECK(cudaFree(d_reductionResults)); delete(h_messages); } void RunTest() { int number_of_messages[NUMBER_OF_RUNS] = MASSAGES_PER_REQUEST_ARR; const char * testPipe = "testPipe"; mkfifo(testPipe, 0666); int pid = fork(); bool isConsumer = !(pid == 0); const char* process_name = (isConsumer) ? "consumer" : "producer"; const char* doneMessage = DONE_MSG; char receivedMessage[DONE_MSG_LEN]; int _fd = open(testPipe, (isConsumer) ? O_WRONLY : O_RDONLY ); GpuTimer h_Timers[NUMBER_OF_RUNS][NUMBER_OF_REQUESTS]; for (int run_num = 0; run_num < NUMBER_OF_RUNS; run_num++) { printf("%s: Run number %d\n", process_name, run_num + 1); runPipeExperiment(h_Timers[run_num], number_of_messages[run_num], isConsumer); dbg_printf("%s finished run number %d\n", process_name, run_num); // Sync if (isConsumer) { if (write(_fd, (void*)(doneMessage), sizeof(char) * 5) == -1) { perror("Consumer sync write"); exit(-1); } } else { if (read(_fd, (void*)(receivedMessage), sizeof(char) * 5) == -1) { perror("Producer sync read"); exit(-1); } if (strcmp(receivedMessage, doneMessage) != 0) dbg_printf("Error: Something went wrong, message is not done\n"); } } char title[100]; sprintf(title, "Three_Element_Gpipe_%s", process_name); save_results(h_Timers, number_of_messages, title); } int main(int argc, char *argv[]) { SetTestArguments(argc, argv); printf("Start three element GPU pipe test\n"); RunTest(); printf("done\n"); return 0; }
26d4ff7239ad7d79aa4ec041c2a80d4c2686ea9a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <fstream> #include <sstream> #include <utility> #include <unordered_map> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <chrono> #include <vector> #include <assert.h> #include <math.h> #define NUM_STREAMS 2 struct GPUTimer { GPUTimer() { hipEventCreate(&start_); hipEventCreate(&stop_); hipEventRecord(start_, 0); } ~GPUTimer() { hipEventDestroy(start_); hipEventDestroy(stop_); } void start() { hipEventRecord(start_, 0); } float seconds() { hipEventRecord(stop_, 0); hipEventSynchronize(stop_); float time; hipEventElapsedTime(&time, start_, stop_); return time * 1e-3; } private: hipEvent_t start_, stop_; }; // This is second version of the gpu implementation // This version a general benchmarking to compare with CPU, // Binary operations will be handled single convolution kernel to utilize register memory usage constexpr std::pair<int, int> register_size(8, 4); constexpr int nTPB=256; template <typename T> struct matrix1d { int lenght; T *arr; }; template <typename T> struct matrix2d { int row; int col; T *arr; }; template <typename T> struct matrix3d { int row; int col; int channel; T *arr; }; template <typename T> struct matrix4d{ int row; int col; int channel_in; int channel_out; T *arr; }; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } std::pair<int, int> find_binary_size(std::pair<int, int>input_size, std::pair<int, int>kernel_size){ int size_x = ceil((input_size.first - register_size.first) /static_cast<double>(register_size.first + 1 - kernel_size.first) + 1); int size_y = ceil((input_size.second - register_size.second ) /static_cast<double>(register_size.second + 1 - kernel_size.second) + 1); if (size_x < 0) size_x = 1; if (size_y < 0) size_y = 1; return std::make_pair(size_x, size_y); } size_t choose_block_size(size_t val){ if (val >= nTPB) return nTPB; if (val <= 32) return 32; val = (val >> 1) | val; val = (val >> 2) | val; val = (val >> 4) | val; val = (val >> 8) | val; val = (val >> 16) | val; val++; return val; } void int2binary(float* input_x, const std::pair<int, int> input_index, std::pair<int, int> output_location, unsigned int &output_y, const std::pair<int ,int>register_size, int input_col) { int sign = 0; long int pozitive = 1; long int negative = 0; int count = output_location.second * register_size.second + output_location.first; assert(count < register_size.second * register_size.first); for (int j=0; j<register_size.second; j++) { for(int i=0; i<register_size.first; i++) { sign = (input_x[(input_index.second) * input_col+ input_index.first + i] > 0) - (input_x[(input_index.second) * input_col+ input_index.first + i] < 0); if (sign == 1) { output_y = pozitive<<count | output_y; } else if (sign == -1) { output_y = negative<<count | output_y; } else { output_y = negative<<count |output_y; } if ((input_index.first + i) >= input_col) { break; } count++; } } } void intMat2BinaryMat(float *const& input_mat, unsigned int *const& binary_mat, std::pair<int, int> kernel_size, int input_row, int input_col, int binary_col, int binary_row) { //float * input_mat = input_tensor.arr[i * input_tensor.channel_in + j]; //unsigned int * binary_mat = binary_tensor.arr[i * input_tensor.channel_in + j]; int index_x = 0; int index_y = 0; std::pair<int, int> input_index(0, 0); std::pair<int, int> output_location(0, 0); // Test while(input_row >= input_index.second) { int i = 0; input_index.first = 0; index_x = 0; while(input_col > i) { i = input_index.first + register_size.first; int2binary(input_mat, input_index, output_location, binary_mat[index_y *binary_col + index_x], register_size, input_col); input_index.first = input_index.first + register_size.first + 1 - kernel_size.first; index_x++; } output_location.second++; input_index.second++; if(input_index.second >= input_row) { break; } if (output_location.second % register_size.second == 0) { output_location.second = 0; input_index.second = input_index.second + 1 - kernel_size.second; index_y++; } } } std::pair<int, int> BinaryMatMemoryAllocation( std::pair<int, int> input_size, std::pair<int, int> kernel_size) { int size_x = ceil((input_size.first - register_size.first) /static_cast<double>(register_size.first + 1 - kernel_size.first) + 1); int size_y = ceil((input_size.second - register_size.second ) /static_cast<double>(register_size.second + 1 - kernel_size.second) + 1); if (size_x < 0) size_x = 1; if (size_y < 0) size_y = 1; return std::make_pair(size_x, size_y); } template <typename T> __global__ void compK_matrix(T* InputImageData, T kernel_value, T* outputImageData, int channel_in, int width, int height) { float accum; int col = threadIdx.x + blockIdx.x * blockDim.x; //col index int row = threadIdx.y + blockIdx.y * blockDim.y; //row index int maskRowsRadius = maskRows / 2; int maskColsRadius = maskCols / 2; for (int k = 0; k < channel_in; k++) { //cycle on kernel channels if (row < height && col < width) { accum = 0; int startRow = row - maskRowsRadius; //row index shifted by mask radius int startCol = col - maskColsRadius; //col index shifted by mask radius for (int i = 0; i < maskRows; i++) { //cycle on mask rows for (int j = 0; j < maskCols; j++) { //cycle on mask columns int currentRow = startRow + i; // row index to fetch data from input image int currentCol = startCol + j; // col index to fetch data from input image if (currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width) { accum += InputImageData[(currentRow * width + currentCol) * channel_in + k] * kernel_value; } else accum += 0; } } outputImageData[(row * width + col) * channel_in + k] = accum; } } } void __global__ zeroPadding(float* input_tensor, float* output_tensor, int kernel_row, int kernel_col, int input_col, int input_row, int output_col, int output_row, int output_channel) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int op_buffer = idx / output_col; // simple buffer for same operation int index_x = (idx % output_col) - (kernel_col - 1)/ 2; int index_y = op_buffer%output_row - (kernel_row - 1)/ 2; int index_z = op_buffer / output_row; if (idx< output_row * output_col * output_channel) { if(index_x >= 0 && index_y >= 0 ) { if( index_x < input_col && index_y < input_row ) { output_tensor[idx] = input_tensor[(index_z * input_col * input_row ) + ( index_y * input_col ) + index_x]; } } else { output_tensor[idx] = 0; } } } void __global__ kernel_sum( const unsigned int * d_idata, float * d_odata, const int col, const int row, const int channel_in, const int channel_out) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < (col * row * channel_out)) { int tidx = idx%(col*row) + ((idx/(col*row) ) *(col * row * channel_in) ); // indexing for 4 dim , since kernel must sum values with same channel out int tsum = 0; #pragma unroll for (int i = 0; i < channel_in; i++) { tsum += d_idata[tidx]; tidx += row * col; } d_odata[idx] = static_cast<float>(tsum);// / static_cast<float>(channel_in); } } template<typename T> __device__ void to_binary_register( const T &idata, unsigned int &odata, int *output_location) { int sign = (idata > 0) - (idata < 0); const unsigned int pozitive = 1; const unsigned int negative = 0; //int count = output_location[1] * register_size.second + output_location[0]; //assert(count < register_size.second * register_size.first); if (sign > -1) { odata = pozitive<<(output_location[1] * register_size.first + output_location[0]) | odata; } else { odata = negative<<(output_location[1] * register_size.first + output_location[0]) | odata; } } template<typename T> void __global__ convert2binary( const T * d_idata, unsigned int * d_odata, const int row, const int b_row, const int col, const int b_col, const int channel, const int kernel_row = 3, const int kernel_col = 3) { // Each thread will store a size = 32 array inside their single register int idx = threadIdx.x+blockDim.x*blockIdx.x; //register IDX // n*(regsiter_size - kernel_size) if (idx < (b_row * b_col * channel)) { int input_index[] = {(idx%b_col) * (register_size.first - kernel_col), ((idx/b_col) % b_row)* (register_size.second - kernel_row), (idx/(b_col * b_row) )}; // x, y ,z int data_idx = input_index[0] + (input_index[1] * col) + (input_index[2] * row * col); //int input_index[] = {data_idx%row, data_idx/col, data_idx/(row*col)}; // from start of array , (x, y, z) int register_location[] = {0, 0}; unsigned int local_register = 0; for (int j=0; register_size.second>j; j++) { for (int i=0; register_size.first>i; i++) { to_binary_register<T>(d_idata[data_idx], local_register, register_location); ++data_idx; input_index[0] += 1; register_location[0] += 1; if (input_index[0] == col) break; } data_idx = data_idx + col - register_location[0]; input_index[1] += 1; input_index[0] = (idx%b_col) * (register_size.first - kernel_col); register_location[0] = 0; register_location[1] += 1; if (input_index[1] == row) break; } d_odata[idx] = local_register; } } template<typename T> void __global__ scalar_multiplication(T* __restrict__ d_idata, const T __restrict__ scalar, const int height, const int width) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx<height * width) { d_idata[idx] = d_idata[idx] * scalar; } } void __global__ scaling_result(T* __restrict__ d_idata, const T* __restrict__ d_scalar, const int height, const int width, const int channel_out) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx<height * width * channel_out) { d_idata[idx] = d_idata[idx] * d_scalar[idx%(height * width)]; } } void __device__ binary2int(const unsigned int idata, unsigned int &odata, int kernel_row, int kernel_col) { constexpr unsigned int mask = 1; unsigned int shifter = 0; unsigned int buffer = 0; for (int j=0; kernel_row>j; ++j) { for(int i=0; kernel_col>i; ++i) { buffer += (idata >> shifter) & mask; ++shifter; } shifter += register_size.first - kernel_col; } odata = 2 * buffer - (kernel_row * kernel_col); } void __global__ binaryConv2d( const unsigned int * input_tensor, unsigned int * output_tensor, const unsigned int * weight_tensor, int input_row, int input_col, int kernel_row, int kernel_col, int output_row, int output_col, int channel_in, int channel_out ) { int idx = threadIdx.x +blockDim.x*blockIdx.x; int conv_per_row = register_size.second - (kernel_row - 1); int conv_per_column = register_size.first - (kernel_col - 1); int output_index_x = (idx % input_col) * conv_per_column; int output_index_y = ((idx / input_col) % input_row) * conv_per_row; if (idx < input_row * input_col * channel_in * channel_out) { unsigned int register_buffer = input_tensor[idx % (input_row * input_col * channel_in)]; if ( (output_index_x + conv_per_column) > output_col) { conv_per_column = output_col - output_index_x; } if ( (output_index_y + conv_per_row) > output_row) { conv_per_row = output_row - output_index_y; } unsigned int mask = ::pow(2, kernel_col) - 1; for (int j=1; kernel_row > j; j++) { mask = (mask<<register_size.first) | static_cast<unsigned int>(::pow(2, kernel_col) - 1); } int default_index = (idx / (input_row * input_col) ) * (output_col * output_row); auto weight_index = idx / (input_row * input_col); unsigned int shifter = 0; for (int j=0; conv_per_row>j; ++j) { for (int i=0; conv_per_column>i; ++i) { unsigned int buffer = (~(register_buffer>>shifter) ^ (weight_tensor[weight_index]) ) & mask; binary2int(buffer, output_tensor[default_index + (output_index_y+j)*output_col + output_index_x + i], kernel_row, kernel_col); ++shifter; } // Check if register is not fully filled, // if not add shifter the missing shift amount shifter += register_size.second - conv_per_column; } } } // This part must be updated to concurrent execution void xnor_convolution(matrix3d<float> &h_input_tensor, matrix4d<unsigned int> &h_weight_tensor, matrix3d<float> &h_output_tensor, const float alpha, int kernel_row, int kernel_col, bool padding=true) { hipEvent_t start, stop; hipEvent_t start1, stop1; hipEvent_t start2, stop2; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start1); hipEventCreate(&stop1); matrix3d<float> d_input_tensor; d_input_tensor.col = h_input_tensor.col; d_input_tensor.row = h_input_tensor.row; d_input_tensor.channel = h_input_tensor.channel; auto copy_size = sizeof(float) * d_input_tensor.col* d_input_tensor.row * d_input_tensor.channel; hipMalloc((void **)&d_input_tensor.arr, copy_size); hipMemcpy(d_input_tensor.arr, h_input_tensor.arr, copy_size, hipMemcpyHostToDevice); // // Calculate K matrix // Use async steam2 hipStream_t stream1; hipStreamCreate(&stream1); matrix2d<float> d_K_matrix; d_K_matrix.col = h_input_tensor.col; d_K_matrix.row = h_input_tensor.row; copy_size = sizeof(float) * d_K_matrix.col* d_K_matrix.row; hipMalloc((void **)&d_K_matrix.arr, copy_size); const float kernel_value = 1.0 / static_cast<float>(h_weight_tensor.row * h_weight_tensor.col); auto block_size = choose_block_size(h_input_tensor.row * h_input_tensor.col); auto grid_size = (h_input_tensor.row * h_input_tensor.col+ block_size - 1)/block_size; hipLaunchKernelGGL(( compK_matrix<float>), dim3(grid_size), dim3(block_size), stream1, 0, d_input_tensor.arr, kernel_value, d_K_matrix.arr, d_input_tensor.channel, d_input_tensor.width, d_input_tensor.height); // hipLaunchKernelGGL(( scalar_multiplication<float>), dim3(grid_size), dim3(block_size), stream1, 0, d_K_matrix.arr, alpha, height, width); matrix3d<float> d_padded_input_tensor; d_padded_input_tensor.row = h_input_tensor.row + kernel_row - 1; d_padded_input_tensor.col = h_input_tensor.col + kernel_col - 1; d_padded_input_tensor.channel = h_input_tensor.channel; copy_size = sizeof(float) * d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel; gpuErrchk(hipMalloc((void **)&d_padded_input_tensor.arr, copy_size)); block_size = choose_block_size(d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel); grid_size = (d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel + block_size - 1)/block_size; hipLaunchKernelGGL(( zeroPadding), dim3(grid_size), dim3(block_size), 0, 0, d_input_tensor.arr, d_padded_input_tensor.arr, kernel_row, kernel_col, d_input_tensor.col, d_input_tensor.row, d_padded_input_tensor.row, d_padded_input_tensor.col, d_padded_input_tensor.channel); //hipFree(d_input_tensor.arr); auto binary_size = find_binary_size(std::make_pair(h_input_tensor.col, h_input_tensor.row), std::make_pair(kernel_col, kernel_row)); matrix3d<unsigned int> d_binary_input_tensor; d_binary_input_tensor.row = binary_size.second; d_binary_input_tensor.col = binary_size.first; d_binary_input_tensor.channel = d_padded_input_tensor.channel; copy_size = sizeof(unsigned int) * d_binary_input_tensor.row * d_binary_input_tensor.col * d_binary_input_tensor.channel; gpuErrchk(hipMalloc((void **)&d_binary_input_tensor.arr, copy_size)); hipEventRecord(start, 0); hipLaunchKernelGGL(( convert2binary), dim3(grid_size), dim3(block_size), 0, 0, d_padded_input_tensor.arr, d_binary_input_tensor.arr, d_padded_input_tensor.row, d_binary_input_tensor.row, d_padded_input_tensor.col, d_binary_input_tensor.col, d_binary_input_tensor.channel, kernel_row, kernel_col); hipEventRecord(stop, 0); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout<<"Int2Binary Time= "<< milliseconds<<std::endl; //hipFree(d_padded_input_tensor.arr); matrix4d<unsigned int> d_convolution_buffer; d_convolution_buffer.col = h_input_tensor.col; d_convolution_buffer.row = h_input_tensor.row; d_convolution_buffer.channel_in = h_input_tensor.channel; d_convolution_buffer.channel_out = h_weight_tensor.channel_out; copy_size = sizeof(unsigned int) * d_convolution_buffer.col * d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out; gpuErrchk(hipMalloc((void **)& d_convolution_buffer.arr, copy_size)); matrix4d<unsigned int> d_weight_tensor; d_weight_tensor.row = h_weight_tensor.row; d_weight_tensor.col = h_weight_tensor.col; d_weight_tensor.channel_in = h_weight_tensor.channel_in; d_weight_tensor.channel_out = h_weight_tensor.channel_out; copy_size = sizeof(unsigned int) * d_weight_tensor.row *d_weight_tensor.col * d_weight_tensor.channel_in * d_weight_tensor.channel_out; gpuErrchk(hipMalloc((void**)&d_weight_tensor.arr, copy_size)); // pinned memory can be tested hipMemcpy(d_weight_tensor.arr, h_weight_tensor.arr, copy_size, hipMemcpyHostToDevice); block_size = choose_block_size(d_convolution_buffer.col * d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out); grid_size = (d_convolution_buffer.col* d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out + block_size - 1)/ block_size; hipEventRecord(start1, 0); hipLaunchKernelGGL(( binaryConv2d), dim3(grid_size), dim3(block_size), 0, 0, d_binary_input_tensor.arr, d_convolution_buffer.arr, d_weight_tensor.arr ,d_binary_input_tensor.row, d_binary_input_tensor.col , kernel_row, kernel_col ,d_convolution_buffer.row, d_convolution_buffer.col ,d_convolution_buffer.channel_in, d_convolution_buffer.channel_out ); hipEventRecord(stop1, 0); hipEventSynchronize(stop1); hipEventElapsedTime(&milliseconds, start1, stop1); std::cout<<"Convolution Time= "<< milliseconds<<std::endl; hipFree(d_binary_input_tensor.arr); matrix3d<float> d_output_tensor; d_output_tensor.col = h_output_tensor.col; d_output_tensor.row = h_output_tensor.row; d_output_tensor.channel = h_output_tensor.channel; copy_size = sizeof(float) * d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel; hipMalloc((void**)&d_output_tensor.arr, copy_size); block_size = choose_block_size(d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel); grid_size = (d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel + block_size - 1) / block_size; hipEventRecord(start2, 0); hipLaunchKernelGGL(( kernel_sum), dim3(grid_size), dim3(block_size), 0, 0, d_convolution_buffer.arr, d_output_tensor.arr, d_output_tensor.col, d_output_tensor.row, d_convolution_buffer.channel_in, d_convolution_buffer.channel_out); hipEventRecord(stop2, 0); hipEventSynchronize(stop2); hipEventElapsedTime(&milliseconds, start2, stop2); std::cout<<"Summation Time= "<< milliseconds<<std::endl; hipDeviceSynchronize() hipStreamDestroy(stream1); // Multiplication with K and alpha //scaling_result<<<>>>(); //hipFree(d_convolution_buffer.arr); hipMemcpy(h_output_tensor.arr, d_output_tensor.arr, copy_size, hipMemcpyDeviceToHost); //hipFree(d_output_tensor.arr); hipEventDestroy(start); hipEventDestroy(stop); hipEventDestroy(start1); hipEventDestroy(stop1); hipEventDestroy(start2); hipEventDestroy(stop2); return; } int main() { int row = 512; int col = 512; int kernel_row = 3; int kernel_col = 3; int channel_in = 1; int channel_out = 1; matrix3d<float> input_tensor; matrix4d<float> weight_tensor; input_tensor.row = row; input_tensor.col = col; input_tensor.channel = channel_in; // Init Matrices input_tensor.arr = new float [input_tensor.channel * input_tensor.row * input_tensor.col]; weight_tensor.row = kernel_row; weight_tensor.col = kernel_col; weight_tensor.channel_in = channel_in; weight_tensor.channel_out = channel_out; weight_tensor.arr = new float [weight_tensor.channel_in * weight_tensor.channel_out * weight_tensor.row * weight_tensor.col]; bool padding = true; // Default Values for(int i=0; input_tensor.channel > i; ++i) { for (int j=0; input_tensor.col * input_tensor.row> j; ++j) { input_tensor.arr[i * input_tensor.col * input_tensor.row + j] = (rand() % 50) - 25; } } for(int i=0; weight_tensor.channel_in * weight_tensor.channel_out > i; ++i) { for (int j=0; weight_tensor.col * weight_tensor.row> j; ++j) { weight_tensor.arr[i * weight_tensor.col * weight_tensor.row + j] = (rand() % 50) -25; } } // Make Weights binary as preProcessing auto weight_size = BinaryMatMemoryAllocation(std::make_pair(weight_tensor.row, weight_tensor.col), std::make_pair(weight_tensor.col, weight_tensor.row)); matrix4d<unsigned int> binary_weight_tensor; binary_weight_tensor.col = weight_size.first; binary_weight_tensor.row = weight_size.second; binary_weight_tensor.channel_in = weight_tensor.channel_in; binary_weight_tensor.channel_out = weight_tensor.channel_out; binary_weight_tensor.arr = new unsigned int [binary_weight_tensor.channel_in * binary_weight_tensor.channel_out *binary_weight_tensor.row * binary_weight_tensor.col]; for (int i= 0; weight_tensor.channel_out > i; ++i) { for(int j=0; weight_tensor.channel_in > j; ++j) { intMat2BinaryMat(&weight_tensor.arr[(i * weight_tensor.channel_in + j) * weight_tensor.row * weight_tensor.col], &binary_weight_tensor.arr[i * weight_tensor.channel_in + j], std::make_pair(weight_tensor.col, weight_tensor.row), weight_tensor.row, weight_tensor.col, binary_weight_tensor.col, binary_weight_tensor.row); } } delete weight_tensor.arr; // A sample layer matrix3d<float> output_tensor; output_tensor.col = input_tensor.col; output_tensor.row = input_tensor.row; output_tensor.channel = input_tensor.channel; output_tensor.arr = new float [input_tensor.col* input_tensor.row * input_tensor.channel]; xnor_convolution(input_tensor, binary_weight_tensor, output_tensor, weight_tensor.row, weight_tensor.col ,padding); delete[] input_tensor.arr; delete[] binary_weight_tensor.arr; delete[] output_tensor.arr; return 0; }
26d4ff7239ad7d79aa4ec041c2a80d4c2686ea9a.cu
#include <iostream> #include <stdlib.h> #include <fstream> #include <sstream> #include <utility> #include <unordered_map> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <chrono> #include <vector> #include <assert.h> #include <math.h> #define NUM_STREAMS 2 struct GPUTimer { GPUTimer() { cudaEventCreate(&start_); cudaEventCreate(&stop_); cudaEventRecord(start_, 0); } ~GPUTimer() { cudaEventDestroy(start_); cudaEventDestroy(stop_); } void start() { cudaEventRecord(start_, 0); } float seconds() { cudaEventRecord(stop_, 0); cudaEventSynchronize(stop_); float time; cudaEventElapsedTime(&time, start_, stop_); return time * 1e-3; } private: cudaEvent_t start_, stop_; }; // This is second version of the gpu implementation // This version a general benchmarking to compare with CPU, // Binary operations will be handled single convolution kernel to utilize register memory usage constexpr std::pair<int, int> register_size(8, 4); constexpr int nTPB=256; template <typename T> struct matrix1d { int lenght; T *arr; }; template <typename T> struct matrix2d { int row; int col; T *arr; }; template <typename T> struct matrix3d { int row; int col; int channel; T *arr; }; template <typename T> struct matrix4d{ int row; int col; int channel_in; int channel_out; T *arr; }; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } std::pair<int, int> find_binary_size(std::pair<int, int>input_size, std::pair<int, int>kernel_size){ int size_x = ceil((input_size.first - register_size.first) /static_cast<double>(register_size.first + 1 - kernel_size.first) + 1); int size_y = ceil((input_size.second - register_size.second ) /static_cast<double>(register_size.second + 1 - kernel_size.second) + 1); if (size_x < 0) size_x = 1; if (size_y < 0) size_y = 1; return std::make_pair(size_x, size_y); } size_t choose_block_size(size_t val){ if (val >= nTPB) return nTPB; if (val <= 32) return 32; val = (val >> 1) | val; val = (val >> 2) | val; val = (val >> 4) | val; val = (val >> 8) | val; val = (val >> 16) | val; val++; return val; } void int2binary(float* input_x, const std::pair<int, int> input_index, std::pair<int, int> output_location, unsigned int &output_y, const std::pair<int ,int>register_size, int input_col) { int sign = 0; long int pozitive = 1; long int negative = 0; int count = output_location.second * register_size.second + output_location.first; assert(count < register_size.second * register_size.first); for (int j=0; j<register_size.second; j++) { for(int i=0; i<register_size.first; i++) { sign = (input_x[(input_index.second) * input_col+ input_index.first + i] > 0) - (input_x[(input_index.second) * input_col+ input_index.first + i] < 0); if (sign == 1) { output_y = pozitive<<count | output_y; } else if (sign == -1) { output_y = negative<<count | output_y; } else { output_y = negative<<count |output_y; } if ((input_index.first + i) >= input_col) { break; } count++; } } } void intMat2BinaryMat(float *const& input_mat, unsigned int *const& binary_mat, std::pair<int, int> kernel_size, int input_row, int input_col, int binary_col, int binary_row) { //float * input_mat = input_tensor.arr[i * input_tensor.channel_in + j]; //unsigned int * binary_mat = binary_tensor.arr[i * input_tensor.channel_in + j]; int index_x = 0; int index_y = 0; std::pair<int, int> input_index(0, 0); std::pair<int, int> output_location(0, 0); // Test while(input_row >= input_index.second) { int i = 0; input_index.first = 0; index_x = 0; while(input_col > i) { i = input_index.first + register_size.first; int2binary(input_mat, input_index, output_location, binary_mat[index_y *binary_col + index_x], register_size, input_col); input_index.first = input_index.first + register_size.first + 1 - kernel_size.first; index_x++; } output_location.second++; input_index.second++; if(input_index.second >= input_row) { break; } if (output_location.second % register_size.second == 0) { output_location.second = 0; input_index.second = input_index.second + 1 - kernel_size.second; index_y++; } } } std::pair<int, int> BinaryMatMemoryAllocation( std::pair<int, int> input_size, std::pair<int, int> kernel_size) { int size_x = ceil((input_size.first - register_size.first) /static_cast<double>(register_size.first + 1 - kernel_size.first) + 1); int size_y = ceil((input_size.second - register_size.second ) /static_cast<double>(register_size.second + 1 - kernel_size.second) + 1); if (size_x < 0) size_x = 1; if (size_y < 0) size_y = 1; return std::make_pair(size_x, size_y); } template <typename T> __global__ void compK_matrix(T* InputImageData, T kernel_value, T* outputImageData, int channel_in, int width, int height) { float accum; int col = threadIdx.x + blockIdx.x * blockDim.x; //col index int row = threadIdx.y + blockIdx.y * blockDim.y; //row index int maskRowsRadius = maskRows / 2; int maskColsRadius = maskCols / 2; for (int k = 0; k < channel_in; k++) { //cycle on kernel channels if (row < height && col < width) { accum = 0; int startRow = row - maskRowsRadius; //row index shifted by mask radius int startCol = col - maskColsRadius; //col index shifted by mask radius for (int i = 0; i < maskRows; i++) { //cycle on mask rows for (int j = 0; j < maskCols; j++) { //cycle on mask columns int currentRow = startRow + i; // row index to fetch data from input image int currentCol = startCol + j; // col index to fetch data from input image if (currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width) { accum += InputImageData[(currentRow * width + currentCol) * channel_in + k] * kernel_value; } else accum += 0; } } outputImageData[(row * width + col) * channel_in + k] = accum; } } } void __global__ zeroPadding(float* input_tensor, float* output_tensor, int kernel_row, int kernel_col, int input_col, int input_row, int output_col, int output_row, int output_channel) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int op_buffer = idx / output_col; // simple buffer for same operation int index_x = (idx % output_col) - (kernel_col - 1)/ 2; int index_y = op_buffer%output_row - (kernel_row - 1)/ 2; int index_z = op_buffer / output_row; if (idx< output_row * output_col * output_channel) { if(index_x >= 0 && index_y >= 0 ) { if( index_x < input_col && index_y < input_row ) { output_tensor[idx] = input_tensor[(index_z * input_col * input_row ) + ( index_y * input_col ) + index_x]; } } else { output_tensor[idx] = 0; } } } void __global__ kernel_sum( const unsigned int * d_idata, float * d_odata, const int col, const int row, const int channel_in, const int channel_out) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < (col * row * channel_out)) { int tidx = idx%(col*row) + ((idx/(col*row) ) *(col * row * channel_in) ); // indexing for 4 dim , since kernel must sum values with same channel out int tsum = 0; #pragma unroll for (int i = 0; i < channel_in; i++) { tsum += d_idata[tidx]; tidx += row * col; } d_odata[idx] = static_cast<float>(tsum);// / static_cast<float>(channel_in); } } template<typename T> __device__ void to_binary_register( const T &idata, unsigned int &odata, int *output_location) { int sign = (idata > 0) - (idata < 0); const unsigned int pozitive = 1; const unsigned int negative = 0; //int count = output_location[1] * register_size.second + output_location[0]; //assert(count < register_size.second * register_size.first); if (sign > -1) { odata = pozitive<<(output_location[1] * register_size.first + output_location[0]) | odata; } else { odata = negative<<(output_location[1] * register_size.first + output_location[0]) | odata; } } template<typename T> void __global__ convert2binary( const T * d_idata, unsigned int * d_odata, const int row, const int b_row, const int col, const int b_col, const int channel, const int kernel_row = 3, const int kernel_col = 3) { // Each thread will store a size = 32 array inside their single register int idx = threadIdx.x+blockDim.x*blockIdx.x; //register IDX // n*(regsiter_size - kernel_size) if (idx < (b_row * b_col * channel)) { int input_index[] = {(idx%b_col) * (register_size.first - kernel_col), ((idx/b_col) % b_row)* (register_size.second - kernel_row), (idx/(b_col * b_row) )}; // x, y ,z int data_idx = input_index[0] + (input_index[1] * col) + (input_index[2] * row * col); //int input_index[] = {data_idx%row, data_idx/col, data_idx/(row*col)}; // from start of array , (x, y, z) int register_location[] = {0, 0}; unsigned int local_register = 0; for (int j=0; register_size.second>j; j++) { for (int i=0; register_size.first>i; i++) { to_binary_register<T>(d_idata[data_idx], local_register, register_location); ++data_idx; input_index[0] += 1; register_location[0] += 1; if (input_index[0] == col) break; } data_idx = data_idx + col - register_location[0]; input_index[1] += 1; input_index[0] = (idx%b_col) * (register_size.first - kernel_col); register_location[0] = 0; register_location[1] += 1; if (input_index[1] == row) break; } d_odata[idx] = local_register; } } template<typename T> void __global__ scalar_multiplication(T* __restrict__ d_idata, const T __restrict__ scalar, const int height, const int width) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx<height * width) { d_idata[idx] = d_idata[idx] * scalar; } } void __global__ scaling_result(T* __restrict__ d_idata, const T* __restrict__ d_scalar, const int height, const int width, const int channel_out) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx<height * width * channel_out) { d_idata[idx] = d_idata[idx] * d_scalar[idx%(height * width)]; } } void __device__ binary2int(const unsigned int idata, unsigned int &odata, int kernel_row, int kernel_col) { constexpr unsigned int mask = 1; unsigned int shifter = 0; unsigned int buffer = 0; for (int j=0; kernel_row>j; ++j) { for(int i=0; kernel_col>i; ++i) { buffer += (idata >> shifter) & mask; ++shifter; } shifter += register_size.first - kernel_col; } odata = 2 * buffer - (kernel_row * kernel_col); } void __global__ binaryConv2d( const unsigned int * input_tensor, unsigned int * output_tensor, const unsigned int * weight_tensor, int input_row, int input_col, int kernel_row, int kernel_col, int output_row, int output_col, int channel_in, int channel_out ) { int idx = threadIdx.x +blockDim.x*blockIdx.x; int conv_per_row = register_size.second - (kernel_row - 1); int conv_per_column = register_size.first - (kernel_col - 1); int output_index_x = (idx % input_col) * conv_per_column; int output_index_y = ((idx / input_col) % input_row) * conv_per_row; if (idx < input_row * input_col * channel_in * channel_out) { unsigned int register_buffer = input_tensor[idx % (input_row * input_col * channel_in)]; if ( (output_index_x + conv_per_column) > output_col) { conv_per_column = output_col - output_index_x; } if ( (output_index_y + conv_per_row) > output_row) { conv_per_row = output_row - output_index_y; } unsigned int mask = std::pow(2, kernel_col) - 1; for (int j=1; kernel_row > j; j++) { mask = (mask<<register_size.first) | static_cast<unsigned int>(std::pow(2, kernel_col) - 1); } int default_index = (idx / (input_row * input_col) ) * (output_col * output_row); auto weight_index = idx / (input_row * input_col); unsigned int shifter = 0; for (int j=0; conv_per_row>j; ++j) { for (int i=0; conv_per_column>i; ++i) { unsigned int buffer = (~(register_buffer>>shifter) ^ (weight_tensor[weight_index]) ) & mask; binary2int(buffer, output_tensor[default_index + (output_index_y+j)*output_col + output_index_x + i], kernel_row, kernel_col); ++shifter; } // Check if register is not fully filled, // if not add shifter the missing shift amount shifter += register_size.second - conv_per_column; } } } // This part must be updated to concurrent execution void xnor_convolution(matrix3d<float> &h_input_tensor, matrix4d<unsigned int> &h_weight_tensor, matrix3d<float> &h_output_tensor, const float alpha, int kernel_row, int kernel_col, bool padding=true) { cudaEvent_t start, stop; cudaEvent_t start1, stop1; cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); matrix3d<float> d_input_tensor; d_input_tensor.col = h_input_tensor.col; d_input_tensor.row = h_input_tensor.row; d_input_tensor.channel = h_input_tensor.channel; auto copy_size = sizeof(float) * d_input_tensor.col* d_input_tensor.row * d_input_tensor.channel; cudaMalloc((void **)&d_input_tensor.arr, copy_size); cudaMemcpy(d_input_tensor.arr, h_input_tensor.arr, copy_size, cudaMemcpyHostToDevice); // // Calculate K matrix // Use async steam2 cudaStream_t stream1; cudaStreamCreate(&stream1); matrix2d<float> d_K_matrix; d_K_matrix.col = h_input_tensor.col; d_K_matrix.row = h_input_tensor.row; copy_size = sizeof(float) * d_K_matrix.col* d_K_matrix.row; cudaMalloc((void **)&d_K_matrix.arr, copy_size); const float kernel_value = 1.0 / static_cast<float>(h_weight_tensor.row * h_weight_tensor.col); auto block_size = choose_block_size(h_input_tensor.row * h_input_tensor.col); auto grid_size = (h_input_tensor.row * h_input_tensor.col+ block_size - 1)/block_size; compK_matrix<float><<<grid_size, block_size, stream1>>>(d_input_tensor.arr, kernel_value, d_K_matrix.arr, d_input_tensor.channel, d_input_tensor.width, d_input_tensor.height); // scalar_multiplication<float><<<grid_size, block_size, stream1>>>(d_K_matrix.arr, alpha, height, width); matrix3d<float> d_padded_input_tensor; d_padded_input_tensor.row = h_input_tensor.row + kernel_row - 1; d_padded_input_tensor.col = h_input_tensor.col + kernel_col - 1; d_padded_input_tensor.channel = h_input_tensor.channel; copy_size = sizeof(float) * d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel; gpuErrchk(cudaMalloc((void **)&d_padded_input_tensor.arr, copy_size)); block_size = choose_block_size(d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel); grid_size = (d_padded_input_tensor.row * d_padded_input_tensor.col * d_padded_input_tensor.channel + block_size - 1)/block_size; zeroPadding<<<grid_size, block_size>>>(d_input_tensor.arr, d_padded_input_tensor.arr, kernel_row, kernel_col, d_input_tensor.col, d_input_tensor.row, d_padded_input_tensor.row, d_padded_input_tensor.col, d_padded_input_tensor.channel); //cudaFree(d_input_tensor.arr); auto binary_size = find_binary_size(std::make_pair(h_input_tensor.col, h_input_tensor.row), std::make_pair(kernel_col, kernel_row)); matrix3d<unsigned int> d_binary_input_tensor; d_binary_input_tensor.row = binary_size.second; d_binary_input_tensor.col = binary_size.first; d_binary_input_tensor.channel = d_padded_input_tensor.channel; copy_size = sizeof(unsigned int) * d_binary_input_tensor.row * d_binary_input_tensor.col * d_binary_input_tensor.channel; gpuErrchk(cudaMalloc((void **)&d_binary_input_tensor.arr, copy_size)); cudaEventRecord(start, 0); convert2binary<<<grid_size, block_size>>>(d_padded_input_tensor.arr, d_binary_input_tensor.arr, d_padded_input_tensor.row, d_binary_input_tensor.row, d_padded_input_tensor.col, d_binary_input_tensor.col, d_binary_input_tensor.channel, kernel_row, kernel_col); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout<<"Int2Binary Time= "<< milliseconds<<std::endl; //cudaFree(d_padded_input_tensor.arr); matrix4d<unsigned int> d_convolution_buffer; d_convolution_buffer.col = h_input_tensor.col; d_convolution_buffer.row = h_input_tensor.row; d_convolution_buffer.channel_in = h_input_tensor.channel; d_convolution_buffer.channel_out = h_weight_tensor.channel_out; copy_size = sizeof(unsigned int) * d_convolution_buffer.col * d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out; gpuErrchk(cudaMalloc((void **)& d_convolution_buffer.arr, copy_size)); matrix4d<unsigned int> d_weight_tensor; d_weight_tensor.row = h_weight_tensor.row; d_weight_tensor.col = h_weight_tensor.col; d_weight_tensor.channel_in = h_weight_tensor.channel_in; d_weight_tensor.channel_out = h_weight_tensor.channel_out; copy_size = sizeof(unsigned int) * d_weight_tensor.row *d_weight_tensor.col * d_weight_tensor.channel_in * d_weight_tensor.channel_out; gpuErrchk(cudaMalloc((void**)&d_weight_tensor.arr, copy_size)); // pinned memory can be tested cudaMemcpy(d_weight_tensor.arr, h_weight_tensor.arr, copy_size, cudaMemcpyHostToDevice); block_size = choose_block_size(d_convolution_buffer.col * d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out); grid_size = (d_convolution_buffer.col* d_convolution_buffer.row * d_convolution_buffer.channel_in * d_convolution_buffer.channel_out + block_size - 1)/ block_size; cudaEventRecord(start1, 0); binaryConv2d<<<grid_size, block_size>>>(d_binary_input_tensor.arr, d_convolution_buffer.arr, d_weight_tensor.arr ,d_binary_input_tensor.row, d_binary_input_tensor.col , kernel_row, kernel_col ,d_convolution_buffer.row, d_convolution_buffer.col ,d_convolution_buffer.channel_in, d_convolution_buffer.channel_out ); cudaEventRecord(stop1, 0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&milliseconds, start1, stop1); std::cout<<"Convolution Time= "<< milliseconds<<std::endl; cudaFree(d_binary_input_tensor.arr); matrix3d<float> d_output_tensor; d_output_tensor.col = h_output_tensor.col; d_output_tensor.row = h_output_tensor.row; d_output_tensor.channel = h_output_tensor.channel; copy_size = sizeof(float) * d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel; cudaMalloc((void**)&d_output_tensor.arr, copy_size); block_size = choose_block_size(d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel); grid_size = (d_output_tensor.row * d_output_tensor.col * d_output_tensor.channel + block_size - 1) / block_size; cudaEventRecord(start2, 0); kernel_sum<<<grid_size, block_size>>>(d_convolution_buffer.arr, d_output_tensor.arr, d_output_tensor.col, d_output_tensor.row, d_convolution_buffer.channel_in, d_convolution_buffer.channel_out); cudaEventRecord(stop2, 0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&milliseconds, start2, stop2); std::cout<<"Summation Time= "<< milliseconds<<std::endl; cudaDeviceSynchronize() cudaStreamDestroy(stream1); // Multiplication with K and alpha //scaling_result<<<>>>(); //cudaFree(d_convolution_buffer.arr); cudaMemcpy(h_output_tensor.arr, d_output_tensor.arr, copy_size, cudaMemcpyDeviceToHost); //cudaFree(d_output_tensor.arr); cudaEventDestroy(start); cudaEventDestroy(stop); cudaEventDestroy(start1); cudaEventDestroy(stop1); cudaEventDestroy(start2); cudaEventDestroy(stop2); return; } int main() { int row = 512; int col = 512; int kernel_row = 3; int kernel_col = 3; int channel_in = 1; int channel_out = 1; matrix3d<float> input_tensor; matrix4d<float> weight_tensor; input_tensor.row = row; input_tensor.col = col; input_tensor.channel = channel_in; // Init Matrices input_tensor.arr = new float [input_tensor.channel * input_tensor.row * input_tensor.col]; weight_tensor.row = kernel_row; weight_tensor.col = kernel_col; weight_tensor.channel_in = channel_in; weight_tensor.channel_out = channel_out; weight_tensor.arr = new float [weight_tensor.channel_in * weight_tensor.channel_out * weight_tensor.row * weight_tensor.col]; bool padding = true; // Default Values for(int i=0; input_tensor.channel > i; ++i) { for (int j=0; input_tensor.col * input_tensor.row> j; ++j) { input_tensor.arr[i * input_tensor.col * input_tensor.row + j] = (rand() % 50) - 25; } } for(int i=0; weight_tensor.channel_in * weight_tensor.channel_out > i; ++i) { for (int j=0; weight_tensor.col * weight_tensor.row> j; ++j) { weight_tensor.arr[i * weight_tensor.col * weight_tensor.row + j] = (rand() % 50) -25; } } // Make Weights binary as preProcessing auto weight_size = BinaryMatMemoryAllocation(std::make_pair(weight_tensor.row, weight_tensor.col), std::make_pair(weight_tensor.col, weight_tensor.row)); matrix4d<unsigned int> binary_weight_tensor; binary_weight_tensor.col = weight_size.first; binary_weight_tensor.row = weight_size.second; binary_weight_tensor.channel_in = weight_tensor.channel_in; binary_weight_tensor.channel_out = weight_tensor.channel_out; binary_weight_tensor.arr = new unsigned int [binary_weight_tensor.channel_in * binary_weight_tensor.channel_out *binary_weight_tensor.row * binary_weight_tensor.col]; for (int i= 0; weight_tensor.channel_out > i; ++i) { for(int j=0; weight_tensor.channel_in > j; ++j) { intMat2BinaryMat(&weight_tensor.arr[(i * weight_tensor.channel_in + j) * weight_tensor.row * weight_tensor.col], &binary_weight_tensor.arr[i * weight_tensor.channel_in + j], std::make_pair(weight_tensor.col, weight_tensor.row), weight_tensor.row, weight_tensor.col, binary_weight_tensor.col, binary_weight_tensor.row); } } delete weight_tensor.arr; // A sample layer matrix3d<float> output_tensor; output_tensor.col = input_tensor.col; output_tensor.row = input_tensor.row; output_tensor.channel = input_tensor.channel; output_tensor.arr = new float [input_tensor.col* input_tensor.row * input_tensor.channel]; xnor_convolution(input_tensor, binary_weight_tensor, output_tensor, weight_tensor.row, weight_tensor.col ,padding); delete[] input_tensor.arr; delete[] binary_weight_tensor.arr; delete[] output_tensor.arr; return 0; }
920b13cc0986434b162542580a520783916497d1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_reconstruct.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *Q = NULL; hipMalloc(&Q, XSIZE*YSIZE); double *D1 = NULL; hipMalloc(&D1, XSIZE*YSIZE); double *U_ = NULL; hipMalloc(&U_, XSIZE*YSIZE); double *X = NULL; hipMalloc(&X, XSIZE*YSIZE); double omega = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_reconstruct), dim3(gridBlock),dim3(threadBlock), 0, 0, Q,D1,U_,X,omega); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_reconstruct), dim3(gridBlock),dim3(threadBlock), 0, 0, Q,D1,U_,X,omega); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_reconstruct), dim3(gridBlock),dim3(threadBlock), 0, 0, Q,D1,U_,X,omega); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
920b13cc0986434b162542580a520783916497d1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_reconstruct.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *Q = NULL; cudaMalloc(&Q, XSIZE*YSIZE); double *D1 = NULL; cudaMalloc(&D1, XSIZE*YSIZE); double *U_ = NULL; cudaMalloc(&U_, XSIZE*YSIZE); double *X = NULL; cudaMalloc(&X, XSIZE*YSIZE); double omega = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_reconstruct<<<gridBlock,threadBlock>>>(Q,D1,U_,X,omega); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_reconstruct<<<gridBlock,threadBlock>>>(Q,D1,U_,X,omega); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_reconstruct<<<gridBlock,threadBlock>>>(Q,D1,U_,X,omega); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e9334cdd4d1ab537669b569371a8590db2732106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define USE_MNIST_LOADER #define MNIST_DOUBLE // includes, system #include <string> #include <iostream> #include <fstream> #include <string> #include <vector> #include <math.h> #include <time.h> #include "layer.h" #include "layer.hip" struct mnist_data { double data[28][28]; int label; //0-9 }; // set Layer static Layer l_input = Layer(0, 0, 28*28); static Layer l_c1 = Layer(5*5, 6, 24*24*6); static Layer l_p = Layer(4*4, 1, 6*6*6); static Layer l_f = Layer(6*6*6, 10, 10); static mnist_data *train_set, *test_set; static unsigned int train_cnt, test_cnt; unsigned int dataToInt(char* c) { unsigned int d = 0; for (int i = 0; i < 4; i++) { d <<= 8; d |= (unsigned char)c[i]; } return d; } int mnist_load( const char *image_filename, const char *label_filename, mnist_data **data, unsigned int *count) { char tmp[4]; unsigned char read_data[28*28]; unsigned int im, l, i, j, k, ic1, ic2, image_cnt, label_cnt; FILE *ifp = fopen(image_filename, "rb"); FILE *lfp = fopen(label_filename, "rb"); if (!ifp || !lfp) { printf("file not open"); if (ifp) fclose(ifp); if (lfp) fclose(lfp); return -1; } fread(tmp, 1, 4, ifp); im = dataToInt(tmp); fread(tmp, 1, 4, lfp); l = dataToInt(tmp); fread(tmp, 1, 4, ifp); image_cnt = dataToInt(tmp); fread(tmp, 1, 4, lfp); label_cnt = dataToInt(tmp); fread(tmp, 1, 4, ifp); ic1 = dataToInt(tmp); fread(tmp, 1, 4, ifp); ic2 = dataToInt(tmp); // printf("im, l, image_cnt, label_cnt, ic1, ic2 \n"); // printf("%d, %d, %d, %d, %d, %d \n", im, l, image_cnt, label_cnt, ic1, ic2); if(im != 2051 || l != 2049 || image_cnt != label_cnt || ic1 != 28 || ic2 != 28){ printf("get wrong file"); fclose(ifp); fclose(lfp); return -2; } *count = image_cnt; *data = (mnist_data *)malloc(sizeof(mnist_data) * image_cnt); for (i = 0; i < image_cnt; i++) { mnist_data *d = &(*data)[i]; fread(read_data, 1, 28*28, ifp); for(j=0; j<28; j++){ for(k=0; k<28; k++) d->data[j][k] = read_data[j*28+k]/255.0; } fread(tmp, 1, 1, lfp); d->label = tmp[0]%10; } fclose(ifp); fclose(lfp); return 0; } static inline void loadData(){ mnist_load("MNIST_data/train-images.idx3-ubyte", "MNIST_data/train-labels.idx1-ubyte", &train_set, &train_cnt); mnist_load("MNIST_data/t10k-images.idx3-ubyte", "MNIST_data/t10k-labels.idx1-ubyte", &test_set, &test_cnt); } static float forward(const double data[28][28]){ // printf("run forward\n"); float input[28][28]; for (int i = 0; i<28; i++){ for (int j = 0; j<28; j++){ input[i][j] = data[i][j]; // printf("%.2f ", data[i][j]); } // printf("\n"); } l_input.clear(); l_c1.clear(); l_p.clear(); l_f.clear(); // printf("**************************************\n"); //example for convLayer 1: l_input.setInput((float *)input); // hipMemcpyToSymbol(conv_input, input, sizeof(float) * 28 * 28); //printf("input image: %f\n", &l_input.output[0][0]); //timer hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); int bz; bz = ceil((float)24/TILE_WIDTH)*ceil((float)24/TILE_WIDTH); dim3 gridDim(1, 6, bz); dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); //constant memory test // ConvLayerForward_Kernel<<<gridDim,blockDim>>>((float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6); hipLaunchKernelGGL(( ConvLayerForward_Kernel_1), dim3(gridDim),dim3(blockDim), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6); hipLaunchKernelGGL(( apply_sigmoid) , dim3(64),dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.bytes); // for pooling layer example: dim3 gridDimPool(1, 1, 1); dim3 blockDimPool(6, 6, 6); hipLaunchKernelGGL(( PoolLayerForward_Kernel), dim3(gridDimPool),dim3(blockDimPool), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, (float (*)[4][4])l_p.weight, l_p.bias ,24, 24, 6, 4); hipLaunchKernelGGL(( apply_sigmoid) , dim3(64),dim3(64), 0, 0, l_p.preact, l_p.output, l_p.bytes); // for fully connected layer dim3 gridDimfc(1, 10, 1); dim3 blockDimfc(6, 6, 6); hipLaunchKernelGGL(( FullyConLayerForward_kernel), dim3(gridDimfc),dim3(blockDimfc), 0, 0, (float (*)[6][6])l_p.output, (float (*)[6][6][6])l_f.weight, l_f.preact, l_f.bias, 1, 6, 10, 1, 10); hipLaunchKernelGGL(( apply_sigmoid), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.bytes); //end timer: hipEventRecord(stop, 0); hipEventSynchronize(stop); // after hipEventRecord hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return time; } static float backward(){ //timer hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); dim3 gridDimfc(1, 10, 1); dim3 blockDimfc(6, 6, 6); hipLaunchKernelGGL(( FullyConLayerBackward_kernel), dim3(gridDimfc), dim3(blockDimfc), 0, 0, l_f.b_preact, l_f.bias, (float (*)[6][6][6]) l_f.weight, (float (*)[6][6])l_p.output, (float (*)[6][6])l_p.b_output); dim3 gridDims(1, 1, 1); dim3 blockDims(6, 6, 6); hipLaunchKernelGGL(( PoolLayerBackward_Kernel), dim3(gridDims), dim3(blockDims), 0, 0, (float (*)[6][6])l_p.preact, (float (*)[6][6])l_p.b_output, (float (*)[4][4])l_p.b_weight, (float (*)[4][4])l_p.weight, (float (*)[24][24])l_c1.output, (float (*)[24][24])l_c1.b_output, l_p.bias); dim3 gridDimc(1, 6, 1); dim3 blockDimc(24, 24, 1); hipLaunchKernelGGL(( ConvLayerBackward_Kernel), dim3(gridDimc), dim3(blockDimc), 0, 0, (float (*)[24][24])l_c1.preact, (float (*)[24][24])l_c1.b_output, (float (*)[5][5])l_c1.weight, (float (*)[28])l_input.output, l_c1.bias); hipEventRecord(stop, 0); hipEventSynchronize(stop); // after hipEventRecord hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return time; } static void learn(){ float time_taken = 0.0; clock_t t; t = clock(); for(int i=0; i< train_cnt; i++){ //for(int i=0; i<10; i++){ // printf("label: %d \n", train_set[i].label); l_f.bp_clear(); l_p.bp_clear(); l_c1.bp_clear(); time_taken += forward(train_set[i].data); hipLaunchKernelGGL(( loss_func), dim3(1), dim3(10), 0, 0, l_f.b_preact, l_f.output, train_set[i].label, 10); time_taken += backward(); } printf("time on GPU: %.5f seconds\n", time_taken / 1000); t = clock() - t; float cpu_time = (float)t/CLOCKS_PER_SEC; printf("Total spend %.2f seconds \n", cpu_time); } static unsigned int classify(double data[28][28]) { float res[10]; forward(data); unsigned int max = 0; hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost); // hipMemcpy(res, l_f.b_preact, sizeof(float) * 10, hipMemcpyDeviceToHost); for (int i = 1; i < 10; ++i) { if (res[max] < res[i]) { max = i; } } return max; } // Perform forward propagation of test data static void test() { int error = 0; for (int i = 0; i < test_cnt; ++i) { if (classify(test_set[i].data) != test_set[i].label) { ++error; } } printf("Test Accuracy:: %.2f%%\n", 100 - ( double(error) / double(test_cnt) * 100.0)); } int main(){ int epoch = 5; printf("CNN CUDA version result: \n"); printf("Number of epoch: %d \n\n", epoch); loadData(); for (int i = 0; i < epoch; i++){ printf("epoch: %d \n", i + 1); learn(); test(); } printf("finish\n"); return 0; }
e9334cdd4d1ab537669b569371a8590db2732106.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #define USE_MNIST_LOADER #define MNIST_DOUBLE // includes, system #include <string> #include <iostream> #include <fstream> #include <string> #include <vector> #include <math.h> #include <time.h> #include "layer.h" #include "layer.cu" struct mnist_data { double data[28][28]; int label; //0-9 }; // set Layer static Layer l_input = Layer(0, 0, 28*28); static Layer l_c1 = Layer(5*5, 6, 24*24*6); static Layer l_p = Layer(4*4, 1, 6*6*6); static Layer l_f = Layer(6*6*6, 10, 10); static mnist_data *train_set, *test_set; static unsigned int train_cnt, test_cnt; unsigned int dataToInt(char* c) { unsigned int d = 0; for (int i = 0; i < 4; i++) { d <<= 8; d |= (unsigned char)c[i]; } return d; } int mnist_load( const char *image_filename, const char *label_filename, mnist_data **data, unsigned int *count) { char tmp[4]; unsigned char read_data[28*28]; unsigned int im, l, i, j, k, ic1, ic2, image_cnt, label_cnt; FILE *ifp = fopen(image_filename, "rb"); FILE *lfp = fopen(label_filename, "rb"); if (!ifp || !lfp) { printf("file not open"); if (ifp) fclose(ifp); if (lfp) fclose(lfp); return -1; } fread(tmp, 1, 4, ifp); im = dataToInt(tmp); fread(tmp, 1, 4, lfp); l = dataToInt(tmp); fread(tmp, 1, 4, ifp); image_cnt = dataToInt(tmp); fread(tmp, 1, 4, lfp); label_cnt = dataToInt(tmp); fread(tmp, 1, 4, ifp); ic1 = dataToInt(tmp); fread(tmp, 1, 4, ifp); ic2 = dataToInt(tmp); // printf("im, l, image_cnt, label_cnt, ic1, ic2 \n"); // printf("%d, %d, %d, %d, %d, %d \n", im, l, image_cnt, label_cnt, ic1, ic2); if(im != 2051 || l != 2049 || image_cnt != label_cnt || ic1 != 28 || ic2 != 28){ printf("get wrong file"); fclose(ifp); fclose(lfp); return -2; } *count = image_cnt; *data = (mnist_data *)malloc(sizeof(mnist_data) * image_cnt); for (i = 0; i < image_cnt; i++) { mnist_data *d = &(*data)[i]; fread(read_data, 1, 28*28, ifp); for(j=0; j<28; j++){ for(k=0; k<28; k++) d->data[j][k] = read_data[j*28+k]/255.0; } fread(tmp, 1, 1, lfp); d->label = tmp[0]%10; } fclose(ifp); fclose(lfp); return 0; } static inline void loadData(){ mnist_load("MNIST_data/train-images.idx3-ubyte", "MNIST_data/train-labels.idx1-ubyte", &train_set, &train_cnt); mnist_load("MNIST_data/t10k-images.idx3-ubyte", "MNIST_data/t10k-labels.idx1-ubyte", &test_set, &test_cnt); } static float forward(const double data[28][28]){ // printf("run forward\n"); float input[28][28]; for (int i = 0; i<28; i++){ for (int j = 0; j<28; j++){ input[i][j] = data[i][j]; // printf("%.2f ", data[i][j]); } // printf("\n"); } l_input.clear(); l_c1.clear(); l_p.clear(); l_f.clear(); // printf("**************************************\n"); //example for convLayer 1: l_input.setInput((float *)input); // cudaMemcpyToSymbol(conv_input, input, sizeof(float) * 28 * 28); //printf("input image: %f\n", &l_input.output[0][0]); //timer cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int bz; bz = ceil((float)24/TILE_WIDTH)*ceil((float)24/TILE_WIDTH); dim3 gridDim(1, 6, bz); dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); //constant memory test // ConvLayerForward_Kernel<<<gridDim,blockDim>>>((float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6); ConvLayerForward_Kernel_1<<<gridDim,blockDim>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight, l_c1.bias, 1, 28, 28, 24, 5, 6); apply_sigmoid <<<64,64>>>(l_c1.preact, l_c1.output, l_c1.bytes); // for pooling layer example: dim3 gridDimPool(1, 1, 1); dim3 blockDimPool(6, 6, 6); PoolLayerForward_Kernel<<<gridDimPool,blockDimPool>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_p.preact, (float (*)[4][4])l_p.weight, l_p.bias ,24, 24, 6, 4); apply_sigmoid <<<64,64>>>(l_p.preact, l_p.output, l_p.bytes); // for fully connected layer dim3 gridDimfc(1, 10, 1); dim3 blockDimfc(6, 6, 6); FullyConLayerForward_kernel<<<gridDimfc,blockDimfc>>>((float (*)[6][6])l_p.output, (float (*)[6][6][6])l_f.weight, l_f.preact, l_f.bias, 1, 6, 10, 1, 10); apply_sigmoid<<<64, 64>>>(l_f.preact, l_f.output, l_f.bytes); //end timer: cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // after cudaEventRecord cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return time; } static float backward(){ //timer cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 gridDimfc(1, 10, 1); dim3 blockDimfc(6, 6, 6); FullyConLayerBackward_kernel<<<gridDimfc, blockDimfc>>>( l_f.b_preact, l_f.bias, (float (*)[6][6][6]) l_f.weight, (float (*)[6][6])l_p.output, (float (*)[6][6])l_p.b_output); dim3 gridDims(1, 1, 1); dim3 blockDims(6, 6, 6); PoolLayerBackward_Kernel<<<gridDims, blockDims>>>( (float (*)[6][6])l_p.preact, (float (*)[6][6])l_p.b_output, (float (*)[4][4])l_p.b_weight, (float (*)[4][4])l_p.weight, (float (*)[24][24])l_c1.output, (float (*)[24][24])l_c1.b_output, l_p.bias); dim3 gridDimc(1, 6, 1); dim3 blockDimc(24, 24, 1); ConvLayerBackward_Kernel<<<gridDimc, blockDimc>>>( (float (*)[24][24])l_c1.preact, (float (*)[24][24])l_c1.b_output, (float (*)[5][5])l_c1.weight, (float (*)[28])l_input.output, l_c1.bias); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // after cudaEventRecord cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return time; } static void learn(){ float time_taken = 0.0; clock_t t; t = clock(); for(int i=0; i< train_cnt; i++){ //for(int i=0; i<10; i++){ // printf("label: %d \n", train_set[i].label); l_f.bp_clear(); l_p.bp_clear(); l_c1.bp_clear(); time_taken += forward(train_set[i].data); loss_func<<<1, 10>>>(l_f.b_preact, l_f.output, train_set[i].label, 10); time_taken += backward(); } printf("time on GPU: %.5f seconds\n", time_taken / 1000); t = clock() - t; float cpu_time = (float)t/CLOCKS_PER_SEC; printf("Total spend %.2f seconds \n", cpu_time); } static unsigned int classify(double data[28][28]) { float res[10]; forward(data); unsigned int max = 0; cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost); // cudaMemcpy(res, l_f.b_preact, sizeof(float) * 10, cudaMemcpyDeviceToHost); for (int i = 1; i < 10; ++i) { if (res[max] < res[i]) { max = i; } } return max; } // Perform forward propagation of test data static void test() { int error = 0; for (int i = 0; i < test_cnt; ++i) { if (classify(test_set[i].data) != test_set[i].label) { ++error; } } printf("Test Accuracy:: %.2f%%\n", 100 - ( double(error) / double(test_cnt) * 100.0)); } int main(){ int epoch = 5; printf("CNN CUDA version result: \n"); printf("Number of epoch: %d \n\n", epoch); loadData(); for (int i = 0; i < epoch; i++){ printf("epoch: %d \n", i + 1); learn(); test(); } printf("finish\n"); return 0; }
ea309040b255eee2c9e78947b80ce07424f4da9a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "../includes/fractol.h" #include "../libft/libft.h" #include <stdio.h> # define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } __global__ void md_2(unsigned int *a, unsigned int constw, unsigned int consth, float middlex, float middley, float scale, unsigned int max) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int index = row * constw + col; if(col >= constw || row >= consth) return; float cr = ((col - ((float)(DEF_WIN_WIDTH - 2) / 2)) / scale) - middlex; float ci = ((row - ((float)(DEF_WIN_HEIGHT - 2) / 2)) / scale) + middley; float zn_1r = 0; float zn_1i = 0; float zn_r = 0; float zn_i = 0; unsigned int iteration = 0; while (iteration < max) { zn_r = (zn_1r * zn_1r) - (zn_1i * zn_1i) + cr; zn_i = 2 * (zn_1r * zn_1i) + ci; zn_1r = zn_r; zn_1i = zn_i; if ((zn_r * zn_r + zn_i * zn_i) > 4) break; iteration++; } a[index] = iteration; } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { (void)file; (void)line; if (abort) exit(code); } } extern "C" void cuda_call_md(unsigned int *a_h, unsigned int constw, unsigned int consth, float middlex, float middley, float scale, unsigned int max, unsigned int reset) { static unsigned int *a_d = NULL; static size_t size = 0; static dim3 block_size(16, 16); static dim3 grid_size(DEF_WIN_WIDTH / block_size.x + (DEF_WIN_WIDTH - DEF_WIN_WIDTH / block_size.x), DEF_WIN_HEIGHT / block_size.y + (DEF_WIN_HEIGHT - DEF_WIN_HEIGHT / block_size.y)); if (!reset) { if (size == 0) { size = constw * consth * sizeof(unsigned int); gpuErrchk(hipMalloc((void **) &a_d, size)); } hipLaunchKernelGGL(( md_2) , dim3(grid_size), dim3(block_size), 0 , 0, a_d, constw, consth, middlex, middley, scale, max); gpuErrchk(hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost)); } else { hipFree(a_d); } }
ea309040b255eee2c9e78947b80ce07424f4da9a.cu
#include <cuda.h> #include "../includes/fractol.h" #include "../libft/libft.h" #include <stdio.h> # define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } __global__ void md_2(unsigned int *a, unsigned int constw, unsigned int consth, float middlex, float middley, float scale, unsigned int max) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int index = row * constw + col; if(col >= constw || row >= consth) return; float cr = ((col - ((float)(DEF_WIN_WIDTH - 2) / 2)) / scale) - middlex; float ci = ((row - ((float)(DEF_WIN_HEIGHT - 2) / 2)) / scale) + middley; float zn_1r = 0; float zn_1i = 0; float zn_r = 0; float zn_i = 0; unsigned int iteration = 0; while (iteration < max) { zn_r = (zn_1r * zn_1r) - (zn_1i * zn_1i) + cr; zn_i = 2 * (zn_1r * zn_1i) + ci; zn_1r = zn_r; zn_1i = zn_i; if ((zn_r * zn_r + zn_i * zn_i) > 4) break; iteration++; } a[index] = iteration; } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { (void)file; (void)line; if (abort) exit(code); } } extern "C" void cuda_call_md(unsigned int *a_h, unsigned int constw, unsigned int consth, float middlex, float middley, float scale, unsigned int max, unsigned int reset) { static unsigned int *a_d = NULL; static size_t size = 0; static dim3 block_size(16, 16); static dim3 grid_size(DEF_WIN_WIDTH / block_size.x + (DEF_WIN_WIDTH - DEF_WIN_WIDTH / block_size.x), DEF_WIN_HEIGHT / block_size.y + (DEF_WIN_HEIGHT - DEF_WIN_HEIGHT / block_size.y)); if (!reset) { if (size == 0) { size = constw * consth * sizeof(unsigned int); gpuErrchk(cudaMalloc((void **) &a_d, size)); } md_2 <<< grid_size, block_size, 0 >>> (a_d, constw, consth, middlex, middley, scale, max); gpuErrchk(cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost)); } else { cudaFree(a_d); } }
9099c188c9f3c0629a80d52c2ef87cec5d3e52b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Getter of the D set // The D set is defined as the nodes in CuB that have more than 3/5*|C| neighbors in C __global__ void stratify_lowdegree_getD(float *CuB, float *C, int *indptr, int *indices, int n, float c, float *D) { const int i = threadIdx.x; D[i] = 0; if(CuB[i] == 0) return; int d = 0; for(int j = indptr[i]; j < indptr[i+1]; j++){ if(C[indices[j]]){ d += 1; } } if(d >= 3 / 5 * c){ D[i] = 1; } }
9099c188c9f3c0629a80d52c2ef87cec5d3e52b7.cu
// Getter of the D set // The D set is defined as the nodes in CuB that have more than 3/5*|C| neighbors in C __global__ void stratify_lowdegree_getD(float *CuB, float *C, int *indptr, int *indices, int n, float c, float *D) { const int i = threadIdx.x; D[i] = 0; if(CuB[i] == 0) return; int d = 0; for(int j = indptr[i]; j < indptr[i+1]; j++){ if(C[indices[j]]){ d += 1; } } if(d >= 3 / 5 * c){ D[i] = 1; } }
d5ffb36bce11e976fcb2eff9e83e0a7949620d8a.hip
// !!! This is a file automatically generated by hipify!!! #define DIM 4096 #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <hip/hip_runtime.h> //#include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <assert.h> #include <chrono> #include "jetson_tx2_power.h" #define TILE_WIDTH 32 __global__ void MatrixMulKernelTiled(double *M, double *N, double *P, int size) { __shared__ double Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ double Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; int Pvalue = 0; for (int ph = 0; ph < (int)ceil(size / (double)TILE_WIDTH); ++ph) { if ((Row < size) && ((ph*TILE_WIDTH + tx) < size)) { Mds[ty][tx] = M[Row * size + ph * TILE_WIDTH + tx]; } if (((ph * TILE_WIDTH + ty) < size) && (Col < size)) { Nds[ty][tx] = N[(ph*TILE_WIDTH + ty) * size + Col]; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } if ((Row < size) && (Col < size)) { P[Row * size + Col] = Pvalue; } } void LaunchKernel(double *M, double *N, double *P, int size) { double *d_A, *d_B, *d_C; int spazio_tot = (size * size) * sizeof(double); hipMalloc((void **)&d_A, spazio_tot); hipMalloc((void **)&d_B, spazio_tot); hipMalloc((void **)&d_C, spazio_tot); hipMemcpy(d_A, M, spazio_tot, hipMemcpyHostToDevice); hipMemcpy(d_B, N, spazio_tot, hipMemcpyHostToDevice); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); dim3 grid(ceil((double)DIM / TILE_WIDTH), ceil((double)DIM / TILE_WIDTH), 1); start_thread(); MatrixMulKernelTiled << <grid, block >> > (d_A, d_B, d_C, size); hipDeviceSynchronize(); stop_thread(); hipMemcpy(P, d_C, spazio_tot, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } void MatrixMulHost(double(*A)[DIM], double(*B)[DIM], double(*C)[DIM]) { for (int c = 0; c < DIM; c++) { for (int d = 0; d < DIM; d++) { int Pvalue = 0; for (int k = 0; k < DIM; k++) { Pvalue += A[c][k] * B[k][d]; } C[c][d] = Pvalue; } } } int main() { double *A =(double *)malloc(DIM*DIM*sizeof(double)); double *B=(double *)malloc(DIM*DIM*sizeof(double)); double *C=(double *)malloc(DIM*DIM*sizeof(double)); //riempio le matrici con dei valori arbitrari for (int i = 0; i < DIM; i++) { for (int j = 0; j < DIM; j++) { A[i*DIM+j] = 1.0; B[i*DIM+j] = 1.0; } } std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); LaunchKernel(&A[0], &B[0], &C[0], DIM); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); printf("%f\n",tempo); printf("%lf\n", C[DIM*DIM -1]); free(A); free(B); free(C); }
d5ffb36bce11e976fcb2eff9e83e0a7949620d8a.cu
#define DIM 4096 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <cuda.h> //#include <device_functions.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <assert.h> #include <chrono> #include "jetson_tx2_power.h" #define TILE_WIDTH 32 __global__ void MatrixMulKernelTiled(double *M, double *N, double *P, int size) { __shared__ double Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ double Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; int Pvalue = 0; for (int ph = 0; ph < (int)ceil(size / (double)TILE_WIDTH); ++ph) { if ((Row < size) && ((ph*TILE_WIDTH + tx) < size)) { Mds[ty][tx] = M[Row * size + ph * TILE_WIDTH + tx]; } if (((ph * TILE_WIDTH + ty) < size) && (Col < size)) { Nds[ty][tx] = N[(ph*TILE_WIDTH + ty) * size + Col]; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } if ((Row < size) && (Col < size)) { P[Row * size + Col] = Pvalue; } } void LaunchKernel(double *M, double *N, double *P, int size) { double *d_A, *d_B, *d_C; int spazio_tot = (size * size) * sizeof(double); cudaMalloc((void **)&d_A, spazio_tot); cudaMalloc((void **)&d_B, spazio_tot); cudaMalloc((void **)&d_C, spazio_tot); cudaMemcpy(d_A, M, spazio_tot, cudaMemcpyHostToDevice); cudaMemcpy(d_B, N, spazio_tot, cudaMemcpyHostToDevice); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); dim3 grid(ceil((double)DIM / TILE_WIDTH), ceil((double)DIM / TILE_WIDTH), 1); start_thread(); MatrixMulKernelTiled << <grid, block >> > (d_A, d_B, d_C, size); cudaDeviceSynchronize(); stop_thread(); cudaMemcpy(P, d_C, spazio_tot, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } void MatrixMulHost(double(*A)[DIM], double(*B)[DIM], double(*C)[DIM]) { for (int c = 0; c < DIM; c++) { for (int d = 0; d < DIM; d++) { int Pvalue = 0; for (int k = 0; k < DIM; k++) { Pvalue += A[c][k] * B[k][d]; } C[c][d] = Pvalue; } } } int main() { double *A =(double *)malloc(DIM*DIM*sizeof(double)); double *B=(double *)malloc(DIM*DIM*sizeof(double)); double *C=(double *)malloc(DIM*DIM*sizeof(double)); //riempio le matrici con dei valori arbitrari for (int i = 0; i < DIM; i++) { for (int j = 0; j < DIM; j++) { A[i*DIM+j] = 1.0; B[i*DIM+j] = 1.0; } } std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); LaunchKernel(&A[0], &B[0], &C[0], DIM); std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count(); printf("%f\n",tempo); printf("%lf\n", C[DIM*DIM -1]); free(A); free(B); free(C); }
98711e5f7c449786c87cfe155ed55c534ad88f44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_mat_transpose(const double* src, double* dst, int colssrc, int colsdst, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ int cdst = tid % colsdst; int rdst = tid / colsdst; int rsrc = cdst; int csrc = rdst; dst[tid] = src[rsrc * colssrc + csrc]; tid += stride; } }
98711e5f7c449786c87cfe155ed55c534ad88f44.cu
#include "includes.h" __global__ void cuda_mat_transpose(const double* src, double* dst, int colssrc, int colsdst, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ int cdst = tid % colsdst; int rdst = tid / colsdst; int rsrc = cdst; int csrc = rdst; dst[tid] = src[rsrc * colssrc + csrc]; tid += stride; } }
c660f4df8cc41730aad1a21b5e01ac6b57c9d047.hip
// !!! This is a file automatically generated by hipify!!! /* We use a term *tile* to identify the rectangular submatrices of the image. Not to be confused with the blocks of threads. */ #include <hip/hip_runtime.h> #include <stdio.h> #define DSM_MAX_TILES_PER_BLOCK 500 #define DSM_MAX_TILES_PER_THREAD 500 // threads per block #define TPB_1D 16 #define TPB (TPB_1D * TPB_1D) // satellite pixels per thread #define SAT_PPT_1D 2 #define SAT_PPT (SAT_PPT_1D * SAT_PPT_1D) // satellite pixels per block #define SAT_PPB_1D (SAT_PPT_1D * TPB_1D) #define SAT_PPB (SAT_PPB_1D * SAT_PPB_1D) // DSM pixels per thread #define DSM_PPT_1D 1 #define DSM_PPT (DSM_PPT_1D * DSM_PPT_1D) // DSM pixels per block #define DSM_PPB_1D (DSM_PPT_1D * TPB_1D) // #define DSM_PPB (DSM_PPB_1D * DSM_PPB_1D) // this needs to be large negative number #define DSM_IGNORE_VALUE -1E5 // extern const float DSM_IGNORE_VALUE; #define EPS 1E-3 #define SCAN_BLOCK_DIM TPB #include "exclusiveScan.cu_inl" #define DTYPE float __device__ bool d_rectanglesIntersect(DTYPE* bbox1, DTYPE* bbox2) { if (bbox2[0] > bbox1[2] || bbox2[1] > bbox1[3] || bbox1[0] > bbox2[2] || bbox1[1] > bbox2[3]) { return false; } else { return true; } } __device__ DTYPE d_area(DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { return abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2; } __device__ DTYPE d_interpolate_three(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE v1, DTYPE x2, DTYPE y2, DTYPE v2, DTYPE x3, DTYPE y3, DTYPE v3) { DTYPE denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3); DTYPE w1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom; DTYPE w2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom; DTYPE w3 = 1. - w1 - w2; return (w1 * v1 + w2 * v2 + w3 * v3); } __device__ bool d_inside_triangle(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { DTYPE A = d_area(x1, y1, x2, y2, x3, y3); DTYPE A1 = d_area(x, y, x1, y1, x2, y2); DTYPE A2 = d_area(x, y, x3, y3, x1, y1); DTYPE A3 = d_area(x, y, x2, y2, x3, y3); return (abs(A1 + A2 + A3 - A) < EPS); } __global__ void kernelRenderSatElevation( DTYPE* pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut, DTYPE* pTilesBboxes, int numDSMTiles, int numDSMTiles_X, int dsm_width, int dsm_height, int sat_width, int sat_height) { // One thread block processes one sattelite tile // linear thread index that is sorted into thread warps int linearThreadIdx = threadIdx.y * blockDim.x + threadIdx.x; // pixels being processed int satTileX0 = blockIdx.x * SAT_PPB_1D; int satTileY0 = blockIdx.y * SAT_PPB_1D; int satTileX1 = satTileX0 + SAT_PPB_1D - 1; int satTileY1 = satTileY0 + SAT_PPB_1D - 1; if (blockIdx.x == gridDim.x - 1) { satTileX1 = sat_width - 1; } if (blockIdx.y == gridDim.y - 1) { satTileY1 = sat_height - 1; } DTYPE satTileBbox[] = { static_cast<DTYPE>(satTileX0), static_cast<DTYPE>(satTileY0), static_cast<DTYPE>(satTileX1), static_cast<DTYPE>(satTileY1), }; __shared__ uint privateTileCount[TPB]; __shared__ uint accumPrivateTileCount[TPB]; // TODO: use tileIndex as scratch to save some memory __shared__ uint privateTileCountScratch[2 * TPB]; __shared__ uint tileIndex [DSM_MAX_TILES_PER_BLOCK]; int dsmTilesPerThread = (numDSMTiles + TPB - 1) / TPB; int dsmTilesStart = dsmTilesPerThread * linearThreadIdx; int dsmTilesEnd = dsmTilesStart + dsmTilesPerThread; // (linearThreadIdx == TPB - 1) condition is wrong, because here // we divide the array into TPB parts, as opposed to dividing // into parts of fixed size dsmTilesEnd = fminf(dsmTilesEnd, numDSMTiles); int numPrivateTiles = 0; uint privateTileList[DSM_MAX_TILES_PER_THREAD]; for (int i = dsmTilesStart; i < dsmTilesEnd; ++i) { if (d_rectanglesIntersect(pTilesBboxes + i * 4, satTileBbox)) privateTileList[numPrivateTiles++] = i; } privateTileCount[linearThreadIdx] = numPrivateTiles; __syncthreads(); sharedMemExclusiveScan(linearThreadIdx, privateTileCount, accumPrivateTileCount, privateTileCountScratch, TPB); __syncthreads(); // total number of DSM tiles that intersect with the current sat tile int numTiles = privateTileCount[TPB - 1] + accumPrivateTileCount[TPB - 1]; /* // TODO: debug for (int dx = 0; dx < SAT_PPT_1D; ++dx) { for (int dy = 0; dy < SAT_PPT_1D; ++dy) { int x = satTileX0 + SAT_PPT_1D * threadIdx.x + dx; int y = satTileY0 + SAT_PPT_1D * threadIdx.y + dy; if (x > sat_width - 1 || y > sat_height - 1) { continue; } int pixelIndex = y * sat_width + x; pOut[pixelIndex] = static_cast<DTYPE>(numTiles); } } int tmpIdx = (blockIdx.y * TPB_1D + threadIdx.y) * sat_width + blockIdx.x * TPB_1D + threadIdx.x; if (tmpIdx < sat_height * sat_width) { // pOut[tmpIdx] = static_cast<int>(accumPrivateTileCount[linearThreadIdx]); pOut[tmpIdx] = static_cast<DTYPE>(numTiles); // pOut[tmpIdx] = static_cast<DTYPE>(numPrivateTiles); } // end */ int curIndex = accumPrivateTileCount[linearThreadIdx]; for (int i = 0; i < numPrivateTiles; ++i) { tileIndex[curIndex++] = privateTileList[i]; } __syncthreads(); for (int iTile = 0; iTile < numTiles; ++iTile) { int dsmTileIndex = tileIndex[iTile]; int dsmTileX0 = (dsmTileIndex % numDSMTiles_X) * (DSM_PPB_1D - 1); int dsmTileY0 = (dsmTileIndex / numDSMTiles_X) * (DSM_PPB_1D - 1); int dsmTileX1 = dsmTileX0 + DSM_PPB_1D - 1; int dsmTileY1 = dsmTileY0 + DSM_PPB_1D - 1; if (dsmTileX1 > dsm_width - 2) { dsmTileX1 = dsm_width - 2; } if (dsmTileY1 > dsm_height - 2) { dsmTileY1 = dsm_height - 2; } for (int row_d = dsmTileY0; row_d <= dsmTileY1; ++row_d) { for (int col_d = dsmTileX0; col_d <= dsmTileX1; ++col_d) { int idx = row_d * dsm_width + col_d; for (int j = 0; j < 2; ++j) { DTYPE x1, y1, elev1, x2, y2, elev2, x3, y3, elev3; if (j == 0) { x1 = pX[idx] - satTileX0; y1 = pY[idx] - satTileY0; elev1 = pZ[idx]; x2 = pX[idx + 1] - satTileX0; y2 = pY[idx + 1] - satTileY0; elev2 = pZ[idx + 1]; x3 = pX[idx + dsm_width] - satTileX0; y3 = pY[idx + dsm_width] - satTileY0; elev3 = pZ[idx + dsm_width]; } else { // j == 1 x1 = pX[idx + 1] - satTileX0; y1 = pY[idx + 1] - satTileY0; elev1 = pZ[idx + 1]; x2 = pX[idx + dsm_width] - satTileX0; y2 = pY[idx + dsm_width] - satTileY0; elev2 = pZ[idx + dsm_width]; x3 = pX[idx + dsm_width + 1] - satTileX0; y3 = pY[idx + dsm_width + 1] - satTileY0; elev3 = pZ[idx + dsm_width + 1]; } // skip invalid faces if ((elev1 < DSM_IGNORE_VALUE + 1) || (elev2 < DSM_IGNORE_VALUE + 1) || (elev3 < DSM_IGNORE_VALUE + 1)) { continue; } for (int dx = 0; dx < SAT_PPT_1D; ++dx) { for (int dy = 0; dy < SAT_PPT_1D; ++dy) { int x = satTileX0 + SAT_PPT_1D * threadIdx.x + dx; int y = satTileY0 + SAT_PPT_1D * threadIdx.y + dy; if (x > sat_width - 1 || y > sat_height - 1) { continue; } int pixelIndex = y * sat_width + x; DTYPE fx = static_cast<DTYPE>(x) - satTileX0; DTYPE fy = static_cast<DTYPE>(y) - satTileY0; // if (d_inside_barycentric( if (d_inside_triangle( fx, fy, x1, y1, x2, y2, x3, y3)) { DTYPE elev = d_interpolate_three( fx, fy, x1, y1, elev1, x2, y2, elev2, x3, y3, elev3); if (elev > pOut[pixelIndex]) { pOut[pixelIndex] = elev; } } } } } } } } /* */ } __global__ void kernelFindDSMBlocksBbox(DTYPE* pX, DTYPE* pY, DTYPE* pZ, DTYPE* pBbox, int width, int height) { // Each block processes a DSM tile that is referenced by blockIdx int dsmStep = DSM_PPB_1D - 1; int rowTileOffset = blockIdx.y * dsmStep; int colTileOffset = blockIdx.x * dsmStep; // thread linear index within a block int linearThreadIdx = threadIdx.y * blockDim.x + threadIdx.x; __shared__ DTYPE cacheX0[TPB]; __shared__ DTYPE cacheX1[TPB]; __shared__ DTYPE cacheY0[TPB]; __shared__ DTYPE cacheY1[TPB]; // find thread-private local values // each thread is allowed to process up to DSM_PPB_1D pixels DTYPE localX0 = 1E10; DTYPE localY0 = 1E10; DTYPE localX1 = -1E10; DTYPE localY1 = -1E10; for (int i = 0; i < DSM_PPT_1D; ++i) { for (int j = 0; j < DSM_PPT_1D; ++j) { int y = rowTileOffset + threadIdx.y * DSM_PPT_1D + i; int x = colTileOffset + threadIdx.x * DSM_PPT_1D + j; if (y >= height || x >= width) { continue; } // global pixel index int pixelIdx = y * width + x; if (pZ[pixelIdx] < DSM_IGNORE_VALUE + 1) { continue; } localX0 = fminf(localX0, pX[pixelIdx]); localX1 = fmaxf(localX1, pX[pixelIdx]); localY0 = fminf(localY0, pY[pixelIdx]); localY1 = fmaxf(localY1, pY[pixelIdx]); } } cacheX0[linearThreadIdx] = localX0; cacheY0[linearThreadIdx] = localY0; cacheX1[linearThreadIdx] = localX1; cacheY1[linearThreadIdx] = localY1; __syncthreads(); // reduction op int threadsPerBlock = blockDim.x * blockDim.y; int i = threadsPerBlock / 2; while (i != 0) { if (linearThreadIdx < i) { cacheX0[linearThreadIdx] = fmin(cacheX0[linearThreadIdx], cacheX0[linearThreadIdx + i]); cacheY0[linearThreadIdx] = fmin(cacheY0[linearThreadIdx], cacheY0[linearThreadIdx + i]); cacheX1[linearThreadIdx] = fmax(cacheX1[linearThreadIdx], cacheX1[linearThreadIdx + i]); cacheY1[linearThreadIdx] = fmax(cacheY1[linearThreadIdx], cacheY1[linearThreadIdx + i]); } __syncthreads(); i /= 2; } if (linearThreadIdx == 0) { int linearBlockIdx = blockIdx.y * gridDim.x + blockIdx.x; pBbox[linearBlockIdx * 4 + 0] = cacheX0[0]; pBbox[linearBlockIdx * 4 + 1] = cacheY0[0]; pBbox[linearBlockIdx * 4 + 2] = cacheX1[0]; pBbox[linearBlockIdx * 4 + 3] = cacheY1[0]; } } void cudaRenderSatElevation(DTYPE * pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut, int dsm_width, int dsm_height, int sat_width, int sat_height) { int dsm_npixels = dsm_width * dsm_height; int sat_npixels = sat_width * sat_height; DTYPE* d_pX; DTYPE* d_pY; DTYPE* d_pZ; DTYPE* d_pOut; hipMalloc((void **)&d_pX, sizeof(DTYPE) * dsm_npixels); hipMalloc((void **)&d_pY, sizeof(DTYPE) * dsm_npixels); hipMalloc((void **)&d_pZ, sizeof(DTYPE) * dsm_npixels); hipMalloc((void **)&d_pOut, sizeof(DTYPE) * sat_npixels); hipMemcpy(d_pX, pX, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice); hipMemcpy(d_pY, pY, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice); hipMemcpy(d_pZ, pZ, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice); // output memory on host contains all min values hipMemcpy(d_pOut, pOut, sizeof(DTYPE) * sat_npixels, hipMemcpyHostToDevice); // number of tiles: // DSM tiles overlap by 1 pixel, // because they are split into triangular faces int dsmStep = DSM_PPB_1D - 1; int dsmTiles_X = ((dsm_width - DSM_PPB_1D) + dsmStep - 1) / dsmStep + 1; int dsmTiles_Y = ((dsm_height - DSM_PPB_1D) + dsmStep - 1) / dsmStep + 1; int dsmTiles = dsmTiles_X * dsmTiles_Y; // tile bounding boxes DTYPE* d_pDSMBbox; hipMalloc((void **)&d_pDSMBbox, sizeof(DTYPE) * 4 * dsmTiles); // hipMemset(d_pDSMBbox, 0, sizeof(DTYPE) * 4 * dsmTiles); // blocks are linked to tiles, threads are linked to pixels dim3 dsmBlocks(dsmTiles_X, dsmTiles_Y); dim3 dsmThreadsPerBlock(TPB_1D, TPB_1D); hipLaunchKernelGGL(( kernelFindDSMBlocksBbox), dim3(dsmBlocks), dim3(dsmThreadsPerBlock), 0, 0, d_pX, d_pY, d_pZ, d_pDSMBbox, dsm_width, dsm_height ); hipDeviceSynchronize(); // hipDeviceSynchronize(); if ( hipSuccess != hipGetLastError() ) printf( "Error in CUDA kernel attempting to find DSM blocks bounding boxes!\n" ); // // check output: // DTYPE x0 = 1E10; // DTYPE y0 = 1E10; // DTYPE x1 = -1E10; // DTYPE y1 = -1E10; // int istart = 1600; // int jstart = 800; // for (int i = istart; i < istart+16; ++i) { // for (int j = jstart; j < jstart+16; ++j) { // int idx = i * width + j; // if (pX[idx] < x0) // x0 = pX[idx]; // if (pX[idx] > x1) // x1 = pX[idx]; // if (pY[idx] < y0) // y0 = pY[idx]; // if (pY[idx] > y1) // y1 = pY[idx]; // } // } // DTYPE tmp[4]; // hipMemcpy(tmp, d_pDSMBbox + 4 * ((istart/16)*dsmTiles_X + (jstart/16)), sizeof(DTYPE) * 4, hipMemcpyDeviceToHost); // printf("%d %d\n", dsmTiles_X, dsmTiles_Y); // printf("%.3f %.3f %.3f %.3f\n", tmp[0], tmp[1], tmp[2], tmp[3]); // printf("%.3f %.3f %.3f %.3f\n", x0, y0, x1, y1); /* int tmpStartTile = 0; for (int i = tmpStartTile; i < tmpStartTile + dsmTiles; ++i) { DTYPE tmp[4]; hipMemcpy(tmp, d_pDSMBbox + 4 * i, sizeof(DTYPE) * 4, hipMemcpyDeviceToHost); // int x = 28; bool flag = true; for (int y = 0; y < 32; ++y) { for (int x = 0; x < 32; ++x) { if (flag && (tmp[0] <= x) && (tmp[1] <= y) && (tmp[2] >= x) && (tmp[3] >= y)) { printf(">>>>>>>>>>>>>>> %d %d, %d\n", i, dsmTiles_X, dsmTiles_Y); flag = false; } }} // printf("%.1f %.1f %.1f %.1f\n", tmp[0], tmp[1], tmp[2], tmp[3]); } */ /* */ // hipMemcpy(pOut, d_pDSMBbox, sizeof(DTYPE) * 4 * dsmTiles, // hipMemcpyDeviceToHost); // printf("=========================== %d %.3f, %.3f\n", dsmTiles, pOut[0], pOut[1]); // return; // blocks per grid const int BPG_X = (sat_width + SAT_PPB_1D - 1) / SAT_PPB_1D; const int BPG_Y = (sat_height + SAT_PPB_1D - 1) / SAT_PPB_1D; dim3 satBlocks(BPG_X, BPG_Y); dim3 satThreadsPerBlock(TPB_1D, TPB_1D); // printf("%d %d %d %d %d %d\n", TPB_1D, TPB, SAT_PPT_1D, SAT_PPT, SAT_PPB_1D, SAT_PPB); // printf("============== %d %d\n", BPG_X, BPG_Y); hipLaunchKernelGGL(( kernelRenderSatElevation), dim3(satBlocks), dim3(satThreadsPerBlock), 0, 0, d_pX, d_pY, d_pZ, d_pOut, d_pDSMBbox, dsmTiles, dsmTiles_X, dsm_width, dsm_height, sat_width, sat_height); hipDeviceSynchronize(); // hipDeviceSynchronize(); if ( hipSuccess != hipGetLastError() ) printf( "Error in CUDA kernel attempting to render satellite elevation!\n" ); hipMemcpy(pOut, d_pOut, sizeof(DTYPE) * sat_npixels, hipMemcpyDeviceToHost); /* int tmpStartI = 0; int tmpStartJ = 0; for (int i = tmpStartI; i < tmpStartI + TPB_1D; ++i) { for (int j = tmpStartJ; j < tmpStartJ + TPB_1D; ++j) { printf("%.3f ", pOut[i * out_width + j]); } printf("\n"); } printf("\n"); DTYPE tmpMax = 0; DTYPE tmpAvg = 0; int tmpCount = 0; for (int i = 0; i < out_height * out_width; ++i) { tmpMax = fmaxf(tmpMax, pOut[i]); tmpAvg += pOut[i]; if (pOut[i] > 20) { tmpCount++; } } printf("%.1f\n", tmpMax); printf("%.1f\n", tmpAvg / (out_height * out_width)); printf("%d\n", tmpCount); */ }
c660f4df8cc41730aad1a21b5e01ac6b57c9d047.cu
/* We use a term *tile* to identify the rectangular submatrices of the image. Not to be confused with the blocks of threads. */ #include <cuda_runtime.h> #include <stdio.h> #define DSM_MAX_TILES_PER_BLOCK 500 #define DSM_MAX_TILES_PER_THREAD 500 // threads per block #define TPB_1D 16 #define TPB (TPB_1D * TPB_1D) // satellite pixels per thread #define SAT_PPT_1D 2 #define SAT_PPT (SAT_PPT_1D * SAT_PPT_1D) // satellite pixels per block #define SAT_PPB_1D (SAT_PPT_1D * TPB_1D) #define SAT_PPB (SAT_PPB_1D * SAT_PPB_1D) // DSM pixels per thread #define DSM_PPT_1D 1 #define DSM_PPT (DSM_PPT_1D * DSM_PPT_1D) // DSM pixels per block #define DSM_PPB_1D (DSM_PPT_1D * TPB_1D) // #define DSM_PPB (DSM_PPB_1D * DSM_PPB_1D) // this needs to be large negative number #define DSM_IGNORE_VALUE -1E5 // extern const float DSM_IGNORE_VALUE; #define EPS 1E-3 #define SCAN_BLOCK_DIM TPB #include "exclusiveScan.cu_inl" #define DTYPE float __device__ bool d_rectanglesIntersect(DTYPE* bbox1, DTYPE* bbox2) { if (bbox2[0] > bbox1[2] || bbox2[1] > bbox1[3] || bbox1[0] > bbox2[2] || bbox1[1] > bbox2[3]) { return false; } else { return true; } } __device__ DTYPE d_area(DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { return abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2; } __device__ DTYPE d_interpolate_three(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE v1, DTYPE x2, DTYPE y2, DTYPE v2, DTYPE x3, DTYPE y3, DTYPE v3) { DTYPE denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3); DTYPE w1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom; DTYPE w2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom; DTYPE w3 = 1. - w1 - w2; return (w1 * v1 + w2 * v2 + w3 * v3); } __device__ bool d_inside_triangle(DTYPE x, DTYPE y, DTYPE x1, DTYPE y1, DTYPE x2, DTYPE y2, DTYPE x3, DTYPE y3) { DTYPE A = d_area(x1, y1, x2, y2, x3, y3); DTYPE A1 = d_area(x, y, x1, y1, x2, y2); DTYPE A2 = d_area(x, y, x3, y3, x1, y1); DTYPE A3 = d_area(x, y, x2, y2, x3, y3); return (abs(A1 + A2 + A3 - A) < EPS); } __global__ void kernelRenderSatElevation( DTYPE* pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut, DTYPE* pTilesBboxes, int numDSMTiles, int numDSMTiles_X, int dsm_width, int dsm_height, int sat_width, int sat_height) { // One thread block processes one sattelite tile // linear thread index that is sorted into thread warps int linearThreadIdx = threadIdx.y * blockDim.x + threadIdx.x; // pixels being processed int satTileX0 = blockIdx.x * SAT_PPB_1D; int satTileY0 = blockIdx.y * SAT_PPB_1D; int satTileX1 = satTileX0 + SAT_PPB_1D - 1; int satTileY1 = satTileY0 + SAT_PPB_1D - 1; if (blockIdx.x == gridDim.x - 1) { satTileX1 = sat_width - 1; } if (blockIdx.y == gridDim.y - 1) { satTileY1 = sat_height - 1; } DTYPE satTileBbox[] = { static_cast<DTYPE>(satTileX0), static_cast<DTYPE>(satTileY0), static_cast<DTYPE>(satTileX1), static_cast<DTYPE>(satTileY1), }; __shared__ uint privateTileCount[TPB]; __shared__ uint accumPrivateTileCount[TPB]; // TODO: use tileIndex as scratch to save some memory __shared__ uint privateTileCountScratch[2 * TPB]; __shared__ uint tileIndex [DSM_MAX_TILES_PER_BLOCK]; int dsmTilesPerThread = (numDSMTiles + TPB - 1) / TPB; int dsmTilesStart = dsmTilesPerThread * linearThreadIdx; int dsmTilesEnd = dsmTilesStart + dsmTilesPerThread; // (linearThreadIdx == TPB - 1) condition is wrong, because here // we divide the array into TPB parts, as opposed to dividing // into parts of fixed size dsmTilesEnd = fminf(dsmTilesEnd, numDSMTiles); int numPrivateTiles = 0; uint privateTileList[DSM_MAX_TILES_PER_THREAD]; for (int i = dsmTilesStart; i < dsmTilesEnd; ++i) { if (d_rectanglesIntersect(pTilesBboxes + i * 4, satTileBbox)) privateTileList[numPrivateTiles++] = i; } privateTileCount[linearThreadIdx] = numPrivateTiles; __syncthreads(); sharedMemExclusiveScan(linearThreadIdx, privateTileCount, accumPrivateTileCount, privateTileCountScratch, TPB); __syncthreads(); // total number of DSM tiles that intersect with the current sat tile int numTiles = privateTileCount[TPB - 1] + accumPrivateTileCount[TPB - 1]; /* // TODO: debug for (int dx = 0; dx < SAT_PPT_1D; ++dx) { for (int dy = 0; dy < SAT_PPT_1D; ++dy) { int x = satTileX0 + SAT_PPT_1D * threadIdx.x + dx; int y = satTileY0 + SAT_PPT_1D * threadIdx.y + dy; if (x > sat_width - 1 || y > sat_height - 1) { continue; } int pixelIndex = y * sat_width + x; pOut[pixelIndex] = static_cast<DTYPE>(numTiles); } } int tmpIdx = (blockIdx.y * TPB_1D + threadIdx.y) * sat_width + blockIdx.x * TPB_1D + threadIdx.x; if (tmpIdx < sat_height * sat_width) { // pOut[tmpIdx] = static_cast<int>(accumPrivateTileCount[linearThreadIdx]); pOut[tmpIdx] = static_cast<DTYPE>(numTiles); // pOut[tmpIdx] = static_cast<DTYPE>(numPrivateTiles); } // end */ int curIndex = accumPrivateTileCount[linearThreadIdx]; for (int i = 0; i < numPrivateTiles; ++i) { tileIndex[curIndex++] = privateTileList[i]; } __syncthreads(); for (int iTile = 0; iTile < numTiles; ++iTile) { int dsmTileIndex = tileIndex[iTile]; int dsmTileX0 = (dsmTileIndex % numDSMTiles_X) * (DSM_PPB_1D - 1); int dsmTileY0 = (dsmTileIndex / numDSMTiles_X) * (DSM_PPB_1D - 1); int dsmTileX1 = dsmTileX0 + DSM_PPB_1D - 1; int dsmTileY1 = dsmTileY0 + DSM_PPB_1D - 1; if (dsmTileX1 > dsm_width - 2) { dsmTileX1 = dsm_width - 2; } if (dsmTileY1 > dsm_height - 2) { dsmTileY1 = dsm_height - 2; } for (int row_d = dsmTileY0; row_d <= dsmTileY1; ++row_d) { for (int col_d = dsmTileX0; col_d <= dsmTileX1; ++col_d) { int idx = row_d * dsm_width + col_d; for (int j = 0; j < 2; ++j) { DTYPE x1, y1, elev1, x2, y2, elev2, x3, y3, elev3; if (j == 0) { x1 = pX[idx] - satTileX0; y1 = pY[idx] - satTileY0; elev1 = pZ[idx]; x2 = pX[idx + 1] - satTileX0; y2 = pY[idx + 1] - satTileY0; elev2 = pZ[idx + 1]; x3 = pX[idx + dsm_width] - satTileX0; y3 = pY[idx + dsm_width] - satTileY0; elev3 = pZ[idx + dsm_width]; } else { // j == 1 x1 = pX[idx + 1] - satTileX0; y1 = pY[idx + 1] - satTileY0; elev1 = pZ[idx + 1]; x2 = pX[idx + dsm_width] - satTileX0; y2 = pY[idx + dsm_width] - satTileY0; elev2 = pZ[idx + dsm_width]; x3 = pX[idx + dsm_width + 1] - satTileX0; y3 = pY[idx + dsm_width + 1] - satTileY0; elev3 = pZ[idx + dsm_width + 1]; } // skip invalid faces if ((elev1 < DSM_IGNORE_VALUE + 1) || (elev2 < DSM_IGNORE_VALUE + 1) || (elev3 < DSM_IGNORE_VALUE + 1)) { continue; } for (int dx = 0; dx < SAT_PPT_1D; ++dx) { for (int dy = 0; dy < SAT_PPT_1D; ++dy) { int x = satTileX0 + SAT_PPT_1D * threadIdx.x + dx; int y = satTileY0 + SAT_PPT_1D * threadIdx.y + dy; if (x > sat_width - 1 || y > sat_height - 1) { continue; } int pixelIndex = y * sat_width + x; DTYPE fx = static_cast<DTYPE>(x) - satTileX0; DTYPE fy = static_cast<DTYPE>(y) - satTileY0; // if (d_inside_barycentric( if (d_inside_triangle( fx, fy, x1, y1, x2, y2, x3, y3)) { DTYPE elev = d_interpolate_three( fx, fy, x1, y1, elev1, x2, y2, elev2, x3, y3, elev3); if (elev > pOut[pixelIndex]) { pOut[pixelIndex] = elev; } } } } } } } } /* */ } __global__ void kernelFindDSMBlocksBbox(DTYPE* pX, DTYPE* pY, DTYPE* pZ, DTYPE* pBbox, int width, int height) { // Each block processes a DSM tile that is referenced by blockIdx int dsmStep = DSM_PPB_1D - 1; int rowTileOffset = blockIdx.y * dsmStep; int colTileOffset = blockIdx.x * dsmStep; // thread linear index within a block int linearThreadIdx = threadIdx.y * blockDim.x + threadIdx.x; __shared__ DTYPE cacheX0[TPB]; __shared__ DTYPE cacheX1[TPB]; __shared__ DTYPE cacheY0[TPB]; __shared__ DTYPE cacheY1[TPB]; // find thread-private local values // each thread is allowed to process up to DSM_PPB_1D pixels DTYPE localX0 = 1E10; DTYPE localY0 = 1E10; DTYPE localX1 = -1E10; DTYPE localY1 = -1E10; for (int i = 0; i < DSM_PPT_1D; ++i) { for (int j = 0; j < DSM_PPT_1D; ++j) { int y = rowTileOffset + threadIdx.y * DSM_PPT_1D + i; int x = colTileOffset + threadIdx.x * DSM_PPT_1D + j; if (y >= height || x >= width) { continue; } // global pixel index int pixelIdx = y * width + x; if (pZ[pixelIdx] < DSM_IGNORE_VALUE + 1) { continue; } localX0 = fminf(localX0, pX[pixelIdx]); localX1 = fmaxf(localX1, pX[pixelIdx]); localY0 = fminf(localY0, pY[pixelIdx]); localY1 = fmaxf(localY1, pY[pixelIdx]); } } cacheX0[linearThreadIdx] = localX0; cacheY0[linearThreadIdx] = localY0; cacheX1[linearThreadIdx] = localX1; cacheY1[linearThreadIdx] = localY1; __syncthreads(); // reduction op int threadsPerBlock = blockDim.x * blockDim.y; int i = threadsPerBlock / 2; while (i != 0) { if (linearThreadIdx < i) { cacheX0[linearThreadIdx] = fmin(cacheX0[linearThreadIdx], cacheX0[linearThreadIdx + i]); cacheY0[linearThreadIdx] = fmin(cacheY0[linearThreadIdx], cacheY0[linearThreadIdx + i]); cacheX1[linearThreadIdx] = fmax(cacheX1[linearThreadIdx], cacheX1[linearThreadIdx + i]); cacheY1[linearThreadIdx] = fmax(cacheY1[linearThreadIdx], cacheY1[linearThreadIdx + i]); } __syncthreads(); i /= 2; } if (linearThreadIdx == 0) { int linearBlockIdx = blockIdx.y * gridDim.x + blockIdx.x; pBbox[linearBlockIdx * 4 + 0] = cacheX0[0]; pBbox[linearBlockIdx * 4 + 1] = cacheY0[0]; pBbox[linearBlockIdx * 4 + 2] = cacheX1[0]; pBbox[linearBlockIdx * 4 + 3] = cacheY1[0]; } } void cudaRenderSatElevation(DTYPE * pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut, int dsm_width, int dsm_height, int sat_width, int sat_height) { int dsm_npixels = dsm_width * dsm_height; int sat_npixels = sat_width * sat_height; DTYPE* d_pX; DTYPE* d_pY; DTYPE* d_pZ; DTYPE* d_pOut; cudaMalloc((void **)&d_pX, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pY, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pZ, sizeof(DTYPE) * dsm_npixels); cudaMalloc((void **)&d_pOut, sizeof(DTYPE) * sat_npixels); cudaMemcpy(d_pX, pX, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); cudaMemcpy(d_pY, pY, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); cudaMemcpy(d_pZ, pZ, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice); // output memory on host contains all min values cudaMemcpy(d_pOut, pOut, sizeof(DTYPE) * sat_npixels, cudaMemcpyHostToDevice); // number of tiles: // DSM tiles overlap by 1 pixel, // because they are split into triangular faces int dsmStep = DSM_PPB_1D - 1; int dsmTiles_X = ((dsm_width - DSM_PPB_1D) + dsmStep - 1) / dsmStep + 1; int dsmTiles_Y = ((dsm_height - DSM_PPB_1D) + dsmStep - 1) / dsmStep + 1; int dsmTiles = dsmTiles_X * dsmTiles_Y; // tile bounding boxes DTYPE* d_pDSMBbox; cudaMalloc((void **)&d_pDSMBbox, sizeof(DTYPE) * 4 * dsmTiles); // cudaMemset(d_pDSMBbox, 0, sizeof(DTYPE) * 4 * dsmTiles); // blocks are linked to tiles, threads are linked to pixels dim3 dsmBlocks(dsmTiles_X, dsmTiles_Y); dim3 dsmThreadsPerBlock(TPB_1D, TPB_1D); kernelFindDSMBlocksBbox<<<dsmBlocks, dsmThreadsPerBlock>>>( d_pX, d_pY, d_pZ, d_pDSMBbox, dsm_width, dsm_height ); cudaThreadSynchronize(); // cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to find DSM blocks bounding boxes!\n" ); // // check output: // DTYPE x0 = 1E10; // DTYPE y0 = 1E10; // DTYPE x1 = -1E10; // DTYPE y1 = -1E10; // int istart = 1600; // int jstart = 800; // for (int i = istart; i < istart+16; ++i) { // for (int j = jstart; j < jstart+16; ++j) { // int idx = i * width + j; // if (pX[idx] < x0) // x0 = pX[idx]; // if (pX[idx] > x1) // x1 = pX[idx]; // if (pY[idx] < y0) // y0 = pY[idx]; // if (pY[idx] > y1) // y1 = pY[idx]; // } // } // DTYPE tmp[4]; // cudaMemcpy(tmp, d_pDSMBbox + 4 * ((istart/16)*dsmTiles_X + (jstart/16)), sizeof(DTYPE) * 4, cudaMemcpyDeviceToHost); // printf("%d %d\n", dsmTiles_X, dsmTiles_Y); // printf("%.3f %.3f %.3f %.3f\n", tmp[0], tmp[1], tmp[2], tmp[3]); // printf("%.3f %.3f %.3f %.3f\n", x0, y0, x1, y1); /* int tmpStartTile = 0; for (int i = tmpStartTile; i < tmpStartTile + dsmTiles; ++i) { DTYPE tmp[4]; cudaMemcpy(tmp, d_pDSMBbox + 4 * i, sizeof(DTYPE) * 4, cudaMemcpyDeviceToHost); // int x = 28; bool flag = true; for (int y = 0; y < 32; ++y) { for (int x = 0; x < 32; ++x) { if (flag && (tmp[0] <= x) && (tmp[1] <= y) && (tmp[2] >= x) && (tmp[3] >= y)) { printf(">>>>>>>>>>>>>>> %d %d, %d\n", i, dsmTiles_X, dsmTiles_Y); flag = false; } }} // printf("%.1f %.1f %.1f %.1f\n", tmp[0], tmp[1], tmp[2], tmp[3]); } */ /* */ // cudaMemcpy(pOut, d_pDSMBbox, sizeof(DTYPE) * 4 * dsmTiles, // cudaMemcpyDeviceToHost); // printf("=========================== %d %.3f, %.3f\n", dsmTiles, pOut[0], pOut[1]); // return; // blocks per grid const int BPG_X = (sat_width + SAT_PPB_1D - 1) / SAT_PPB_1D; const int BPG_Y = (sat_height + SAT_PPB_1D - 1) / SAT_PPB_1D; dim3 satBlocks(BPG_X, BPG_Y); dim3 satThreadsPerBlock(TPB_1D, TPB_1D); // printf("%d %d %d %d %d %d\n", TPB_1D, TPB, SAT_PPT_1D, SAT_PPT, SAT_PPB_1D, SAT_PPB); // printf("============== %d %d\n", BPG_X, BPG_Y); kernelRenderSatElevation<<<satBlocks, satThreadsPerBlock>>>( d_pX, d_pY, d_pZ, d_pOut, d_pDSMBbox, dsmTiles, dsmTiles_X, dsm_width, dsm_height, sat_width, sat_height); cudaThreadSynchronize(); // cudaDeviceSynchronize(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in CUDA kernel attempting to render satellite elevation!\n" ); cudaMemcpy(pOut, d_pOut, sizeof(DTYPE) * sat_npixels, cudaMemcpyDeviceToHost); /* int tmpStartI = 0; int tmpStartJ = 0; for (int i = tmpStartI; i < tmpStartI + TPB_1D; ++i) { for (int j = tmpStartJ; j < tmpStartJ + TPB_1D; ++j) { printf("%.3f ", pOut[i * out_width + j]); } printf("\n"); } printf("\n"); DTYPE tmpMax = 0; DTYPE tmpAvg = 0; int tmpCount = 0; for (int i = 0; i < out_height * out_width; ++i) { tmpMax = fmaxf(tmpMax, pOut[i]); tmpAvg += pOut[i]; if (pOut[i] > 20) { tmpCount++; } } printf("%.1f\n", tmpMax); printf("%.1f\n", tmpAvg / (out_height * out_width)); printf("%d\n", tmpCount); */ }
f71e40907705272549f379829f3d2def71264869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "DeltaKernel.cuh" __device__ double dfabs(double b1, double b2){ double result = b1 - b2; if (result > 0) return result; else return -result; } __device__ int ifabs(int b1, int b2){ int result = b1 - b2; if (result > 0) return result; else return -result; } __device__ int findAminoInPrior(char *amino, char aa, int size){ for (int i = 0; i < size; i++){ if (amino[i] == aa) return i; } return -1; } __device__ int findPtmIndex(int* numptm, int t){ int num = 0; for (int i = 0; i < t; i++){ num += numptm[i]; } return num; } __host__ void convert(vector<mass_t>& mass, vector<int>& massInt, vector<mass_t>& peaks, vector<int>& peaksInt, vector<mass_t>& delta, vector<int>& deltaInt){ int fold = 10000; for (int i = 0; i < mass.size(); i++){ massInt.push_back(mass[i] * fold); } for (int i = 0; i < peaks.size(); i++){ peaksInt.push_back(peaks[i] * fold); } for (int i = 0; i < delta.size(); i++){ deltaInt.push_back(delta[i] * fold); } } __host__ void calcDiff(vector<int>& massInt, vector<int>& peaksInt, vector<int>& diff){ int x = massInt.size(); int y = peaksInt.size(); for (int j = 0; j < y; j++){ for (int i = 0; i < x; i++){ int index = j*x + i; diff[index] = peaksInt[j] - massInt[i]; } } } __host__ void calcDiff(vector<double>& mass, vector<double>& peaks, vector<double>& diff){ int x = mass.size(); int y = peaks.size(); for (int j = 0; j < y; j++){ for (int i = 0; i < x; i++){ int index = j*x + i; diff[index] = peaks[j] - mass[i]; } } } __global__ void KernelInitialDeltaDiag2(int x, int y, int* diff, int* numDeltaCoords, int* deltaCoordsx, int* deltaCoordsy, int tolerance, int* delta, int deltaSize){ int idx = blockDim.x*blockIdx.x + threadIdx.x; //atomicAdd(&num, 1); int numDelta = 0; if (idx < x*y){ int currx = idx%x; int curry = idx / x; for (int j = 0; j < curry; j++){ for (int i = 0; i < currx; i++){ int index = j*x + i; int valueDelta = diff[idx] - diff[index]; //int ptmIndex = findPtmIndex(numptm, faa); if (valueDelta>0 && valueDelta < 800000){ for (int no_ptmi = 0; no_ptmi < deltaSize; no_ptmi++){ int toleranceDelta = ifabs(valueDelta, delta[no_ptmi]); if (toleranceDelta < tolerance){ deltaCoordsx[5 * idx + numDelta % 5] = i; deltaCoordsy[5 * idx + numDelta % 5] = j; numDelta++; break; } } } } //} } numDeltaCoords[idx] = numDelta; } } __global__ void KernelInitialDeltaDiag(int x, int y, double* diff, int* numDeltaCoords, int* deltaCoordsx, int* deltaCoordsy, double tolerance, double* delta, int deltaSize){ int idx = blockDim.x*blockIdx.x + threadIdx.x; //atomicAdd(&num, 1); int numDelta = 0; if (idx < x*y){ int currx = idx%x; int curry = idx / x; for (int j = 0; j < curry; j++){ for (int i = 0; i < currx; i++){ int index = j*x + i; int valueDelta = diff[idx] - diff[index]; //int ptmIndex = findPtmIndex(numptm, faa); if (valueDelta>0 && valueDelta < 80){ for (int no_ptmi = 0; no_ptmi < deltaSize; no_ptmi++){ int toleranceDelta = dfabs(valueDelta, delta[no_ptmi]); if (toleranceDelta < tolerance){ deltaCoordsx[5 * idx + numDelta % 5] = i; deltaCoordsy[5 * idx + numDelta % 5] = j; numDelta++; break; } } } } numDeltaCoords[idx] = numDelta; } } }
f71e40907705272549f379829f3d2def71264869.cu
#include "DeltaKernel.cuh" __device__ double dfabs(double b1, double b2){ double result = b1 - b2; if (result > 0) return result; else return -result; } __device__ int ifabs(int b1, int b2){ int result = b1 - b2; if (result > 0) return result; else return -result; } __device__ int findAminoInPrior(char *amino, char aa, int size){ for (int i = 0; i < size; i++){ if (amino[i] == aa) return i; } return -1; } __device__ int findPtmIndex(int* numptm, int t){ int num = 0; for (int i = 0; i < t; i++){ num += numptm[i]; } return num; } __host__ void convert(vector<mass_t>& mass, vector<int>& massInt, vector<mass_t>& peaks, vector<int>& peaksInt, vector<mass_t>& delta, vector<int>& deltaInt){ int fold = 10000; for (int i = 0; i < mass.size(); i++){ massInt.push_back(mass[i] * fold); } for (int i = 0; i < peaks.size(); i++){ peaksInt.push_back(peaks[i] * fold); } for (int i = 0; i < delta.size(); i++){ deltaInt.push_back(delta[i] * fold); } } __host__ void calcDiff(vector<int>& massInt, vector<int>& peaksInt, vector<int>& diff){ int x = massInt.size(); int y = peaksInt.size(); for (int j = 0; j < y; j++){ for (int i = 0; i < x; i++){ int index = j*x + i; diff[index] = peaksInt[j] - massInt[i]; } } } __host__ void calcDiff(vector<double>& mass, vector<double>& peaks, vector<double>& diff){ int x = mass.size(); int y = peaks.size(); for (int j = 0; j < y; j++){ for (int i = 0; i < x; i++){ int index = j*x + i; diff[index] = peaks[j] - mass[i]; } } } __global__ void KernelInitialDeltaDiag2(int x, int y, int* diff, int* numDeltaCoords, int* deltaCoordsx, int* deltaCoordsy, int tolerance, int* delta, int deltaSize){ int idx = blockDim.x*blockIdx.x + threadIdx.x; //atomicAdd(&num, 1); int numDelta = 0; if (idx < x*y){ int currx = idx%x; int curry = idx / x; for (int j = 0; j < curry; j++){ for (int i = 0; i < currx; i++){ int index = j*x + i; int valueDelta = diff[idx] - diff[index]; //int ptmIndex = findPtmIndex(numptm, faa); if (valueDelta>0 && valueDelta < 800000){ for (int no_ptmi = 0; no_ptmi < deltaSize; no_ptmi++){ int toleranceDelta = ifabs(valueDelta, delta[no_ptmi]); if (toleranceDelta < tolerance){ deltaCoordsx[5 * idx + numDelta % 5] = i; deltaCoordsy[5 * idx + numDelta % 5] = j; numDelta++; break; } } } } //} } numDeltaCoords[idx] = numDelta; } } __global__ void KernelInitialDeltaDiag(int x, int y, double* diff, int* numDeltaCoords, int* deltaCoordsx, int* deltaCoordsy, double tolerance, double* delta, int deltaSize){ int idx = blockDim.x*blockIdx.x + threadIdx.x; //atomicAdd(&num, 1); int numDelta = 0; if (idx < x*y){ int currx = idx%x; int curry = idx / x; for (int j = 0; j < curry; j++){ for (int i = 0; i < currx; i++){ int index = j*x + i; int valueDelta = diff[idx] - diff[index]; //int ptmIndex = findPtmIndex(numptm, faa); if (valueDelta>0 && valueDelta < 80){ for (int no_ptmi = 0; no_ptmi < deltaSize; no_ptmi++){ int toleranceDelta = dfabs(valueDelta, delta[no_ptmi]); if (toleranceDelta < tolerance){ deltaCoordsx[5 * idx + numDelta % 5] = i; deltaCoordsy[5 * idx + numDelta % 5] = j; numDelta++; break; } } } } numDeltaCoords[idx] = numDelta; } } }
430a587bc602c1846d37b17a1d8a5d76d33b3624.hip
// !!! This is a file automatically generated by hipify!!! #include "test_d.cuh" test_d::test_d(test t) : anwser(t.anwser) { hipMalloc(&features, t.features.size() * sizeof(double)); hipMemcpy(features, &t.features[0], t.features.size() * sizeof(double), hipMemcpyHostToDevice); } //test_d::~test_d() //{ // hipFree(features); //}
430a587bc602c1846d37b17a1d8a5d76d33b3624.cu
#include "test_d.cuh" test_d::test_d(test t) : anwser(t.anwser) { cudaMalloc(&features, t.features.size() * sizeof(double)); cudaMemcpy(features, &t.features[0], t.features.size() * sizeof(double), cudaMemcpyHostToDevice); } //test_d::~test_d() //{ // cudaFree(features); //}
5f645eca6bbdd2314428a0065a5d279e03811978.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "dimensions.h" __global__ void checkIndex(int funcId) { /*printf("threadIdx:(%2d, %2d, %2d) blockIdx:(%2d, %2d, %2d) blockDim:(%2d, %2d, %2d) " "gridDim:(%2d, %2d, %2d) -> id: %2d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, getGlobalIdFunc[funcId]());*/ printf("gridDim:(%2d, %2d, %2d) blockDim:(%2d, %2d, %2d) blockIdx:(%2d, %2d, %2d) " "threadIdx:(%2d, %2d, %2d) -> id: %2d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdFunc[funcId]()); } int main(int argc, char **argv) { if (argc != 8) { printf("Uso: %s <g.x> <g.y> <g.z> <b.x> <b.y> <b.z> <gpuId>\n", argv[0]); return 0; } /* Definio do arranjo de threads em blocos do grid. */ int gx = atoi(argv[1]); int gy = atoi(argv[2]); int gz = atoi(argv[3]); int bx = atoi(argv[4]); int by = atoi(argv[5]); int bz = atoi(argv[6]); dim3 grid(gx, gy, gz); dim3 block(bx, by, bz); printf("config(gx: %d, gy: %d, gz: %d, bx: %d, by: %d, bz: %d)\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); /* grid(gx,gy,gx) block(bx,by,bz) funcId escolhida com base nos valores de [gx,gy,gx,bx,by,bz] Cada valor ir contribuir com uma parcela para o clculo do ndice da funo: [gx > 1, gy > 1, gx > 1, bx > 1, by > 1, bz > 1] Exemplo: grid(32,1,1) block(32,1,1) [1,0,0,1,0,0] -> [32,16,8,4,2,1] = [32 + 4] = 36 A funo getGlobalIdFunc(36) ser: // 36: 100 100 getGlobalIdx_grid_1D_x_block_1D_x */ int funcId = calculateFunctionId(grid, block); printf("funcId: %d\n", funcId); int gpuId = atoi(argv[7]); /* Define the gpu id to work */ hipSetDevice(gpuId); // check grid and block dimension from host side printf("config(gx: %d, gy: %d, gz: %d, bx: %d, by: %d, bz: %d)\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); printf("gridDim:( x, y, z) blockDim:( x, y, z) blockIdx:( x, y, z) threadIdx:( x, y, z)\n"); // check grid and block dimension from device side hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, funcId); // reset device before you leave hipDeviceReset(); return (0); }
5f645eca6bbdd2314428a0065a5d279e03811978.cu
#include <cuda_runtime.h> #include <stdio.h> #include "dimensions.h" __global__ void checkIndex(int funcId) { /*printf("threadIdx:(%2d, %2d, %2d) blockIdx:(%2d, %2d, %2d) blockDim:(%2d, %2d, %2d) " "gridDim:(%2d, %2d, %2d) -> id: %2d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, getGlobalIdFunc[funcId]());*/ printf("gridDim:(%2d, %2d, %2d) blockDim:(%2d, %2d, %2d) blockIdx:(%2d, %2d, %2d) " "threadIdx:(%2d, %2d, %2d) -> id: %2d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdFunc[funcId]()); } int main(int argc, char **argv) { if (argc != 8) { printf("Uso: %s <g.x> <g.y> <g.z> <b.x> <b.y> <b.z> <gpuId>\n", argv[0]); return 0; } /* Definição do arranjo de threads em blocos do grid. */ int gx = atoi(argv[1]); int gy = atoi(argv[2]); int gz = atoi(argv[3]); int bx = atoi(argv[4]); int by = atoi(argv[5]); int bz = atoi(argv[6]); dim3 grid(gx, gy, gz); dim3 block(bx, by, bz); printf("config(gx: %d, gy: %d, gz: %d, bx: %d, by: %d, bz: %d)\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); /* grid(gx,gy,gx) block(bx,by,bz) funcId é escolhida com base nos valores de [gx,gy,gx,bx,by,bz] Cada valor irá contribuir com uma parcela para o cálculo do índice da função: [gx > 1, gy > 1, gx > 1, bx > 1, by > 1, bz > 1] Exemplo: grid(32,1,1) block(32,1,1) [1,0,0,1,0,0] -> [32,16,8,4,2,1] = [32 + 4] = 36 A função getGlobalIdFunc(36) será: // 36: 100 100 getGlobalIdx_grid_1D_x_block_1D_x */ int funcId = calculateFunctionId(grid, block); printf("funcId: %d\n", funcId); int gpuId = atoi(argv[7]); /* Define the gpu id to work */ cudaSetDevice(gpuId); // check grid and block dimension from host side printf("config(gx: %d, gy: %d, gz: %d, bx: %d, by: %d, bz: %d)\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); printf("gridDim:( x, y, z) blockDim:( x, y, z) blockIdx:( x, y, z) threadIdx:( x, y, z)\n"); // check grid and block dimension from device side checkIndex<<<grid, block>>>(funcId); // reset device before you leave cudaDeviceReset(); return (0); }
d5e2495b6849652ff6f944f84d7f7ec64a420880.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \file * \brief * \author */ #include <stdio.h> #include <math.h> #include "./nms_3d-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) #define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } //#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ float norm(){ return sqrt(x * x + y * y); } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float inner_product(const Point &A, const Point &B, const Point &C){ // vector:AB * vector:AC Point AB = B - A; Point AC = C - A; return AB.x * AC.x + AB.y * AC.y; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross_3d(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box3d(const float *box, const Point &P){ // const float MARGIN = 1e-5; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); float dot1 = inner_product(B,A,P); if (dot1 < 0) return false; float dot2 = inner_product(B,C,P); if (dot2 < 0) return false; float dot3 = inner_product(D,A,P); if (dot3 < 0) return false; float dot4 = inner_product(D,C,P); if (dot4 < 0) return false; return true; } __device__ inline int check_in_box3d_anotherway(const float *box, const Point &P){ const float MARGIN = -1e-2; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); Point AB_vec = B - A; Point BC_vec = C - B; Point CD_vec = D - C; Point DA_vec = A - D; auto is_clock_wise = cross(AB_vec,BC_vec); #ifdef DEBUG printf("AB_vec: (%f, %f)\n", AB_vec.x, AB_vec.y); printf("BC_vec: (%f, %f)\n", BC_vec.x, BC_vec.y); printf("CD_vec: (%f, %f)\n", CD_vec.x, CD_vec.y); printf("DA_vec: (%f, %f)\n", DA_vec.x, DA_vec.y); printf("is_clock_wise: %f\n", is_clock_wise); #endif Point PA_vec = A - P; float cross1 = cross(PA_vec, AB_vec); if (cross1 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross1: %f, PA.x: %f, PA.y: %f\n", cross1, PA_vec.x, PA_vec.y); #endif return false; } Point PB_vec = B - P; float cross2 = cross(PB_vec, BC_vec); if (cross2 * is_clock_wise < MARGIN){ #ifdef DEBUG printf("cross2: %f, PB.x: %f, PB.y: %f\n", cross2, PB_vec.x, PB_vec.y); #endif return false; } Point PC_vec = C - P; float cross3 = cross(PC_vec, CD_vec); if (cross3 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross3: %f, PC.x: %f, PC.y: %f\n", cross3, PC_vec.x, PC_vec.y); #endif return false; } Point PD_vec = D - P; float cross4 = cross(PD_vec, DA_vec); if (cross4 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross4: %f, PD.x: %f, PD.y: %f\n", cross4, PD_vec.x, PD_vec.y); #endif return false; } return true; } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross_3d(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float get_area(const float* box){ float x1 = box[0], y1 = box[1], x2 = box[2], y2 = box[3], x3 = box[4], y3 = box[5]; float edge1 = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); float edge2 = (x3 - x2) * (x3 - x2) + (y3 - y2) * (y3 - y2); return sqrt(edge1 * edge2); } __device__ inline float max4(const float x1, const float x2, const float x3, const float x4){ float max = -1000000; if (x1 > max) max = x1; if (x2 > max) max = x2; if (x3 > max) max = x3; if (x4 > max) max = x4; return max; } __device__ inline float min4(const float x1, const float x2, const float x3, const float x4){ float min = 1000000; if (x1 < min) min = x1; if (x2 < min) min = x2; if (x3 < min) min = x3; if (x4 < min) min = x4; return min; } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // float x_a_min = min4(box_a[0], box_a[2], box_a[4], box_a[6]); // float x_a_max = max4(box_a[0], box_a[2], box_a[4], box_a[6]); // float y_a_min = min4(box_a[1], box_a[3], box_a[5], box_a[7]); // float y_a_max = max4(box_a[1], box_a[3], box_a[5], box_a[7]); // float x_b_min = min4(box_b[0], box_b[2], box_b[4], box_b[6]); // float x_b_max = max4(box_b[0], box_b[2], box_b[4], box_b[6]); // float y_b_min = min4(box_b[1], box_b[3], box_b[5], box_b[7]); // float y_b_max = max4(box_b[1], box_b[3], box_b[5], box_b[7]); // if (x_a_max < x_b_min || x_a_min > x_b_max || y_a_max < y_b_min || y_a_min > y_b_max) return 0; // Point center_a; // center_a.set((box_a[0] + box_a[2] + box_a[4] + box_a[6]) / 4.0, // (box_a[1] + box_a[3] + box_a[5] + box_a[7]) / 4.0); // // printf("center_a:(%f, %f)\n", center_a.x, center_a.y); // Point center_b; // center_b.set((box_b[0] + box_b[2] + box_b[4] + box_b[6]) / 4.0, // (box_b[1] + box_b[3] + box_b[5] + box_b[7]) / 4.0); // // printf("center_b:(%f, %f)\n", center_b.x, center_b.y); // Point two_center_vec = center_a - center_b; // // printf("two_center_vec:(%f, %f)\n", two_center_vec.x, two_center_vec.y); // float center_dist = two_center_vec.norm(); // // printf("center_dist:%f\n", center_dist); // float area_a = get_area(box_a); // float area_b = get_area(box_b); // float min_area = area_a < area_b ? area_a : area_b; // if (center_dist < 0.2){ // return min_area; // } // else return 0; Point box_a_corners[5]; box_a_corners[0].set(box_a[0],box_a[1]); box_a_corners[1].set(box_a[2],box_a[3]); box_a_corners[2].set(box_a[4],box_a[5]); box_a_corners[3].set(box_a[6],box_a[7]); Point box_b_corners[5]; box_b_corners[0].set(box_b[0],box_b[1]); box_b_corners[1].set(box_b[2],box_b[3]); box_b_corners[2].set(box_b[4],box_b[5]); box_b_corners[3].set(box_b[6],box_b[7]); box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; #ifdef DEBUG printf("Intersect point (%f, %f)\n", cross_points[cnt].x, cross_points[cnt].y); #endif cnt++; } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box3d_anotherway(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_a\n", box_b_corners[k].x, box_b_corners[k].y); #endif } if (check_in_box3d_anotherway(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_b\n", box_a_corners[k].x, box_a_corners[k].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); auto thread_id = threadIdx.x; for (int i = 0; i < cnt; i++){ printf("thread: %d, All cross point %d: (%.3f, %.3f)\n", thread_id, i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { float height_a = box_a[9] - box_a[8]; float height_b = box_b[9] - box_b[8]; float overlap_height = fminf(box_a[9], box_b[9]) - fmaxf(box_a[8], box_b[8]); if (overlap_height < 0) overlap_height = 0; float area_a = get_area(box_a); float area_b = get_area(box_b); float volume_a = area_a * height_a; float volume_b = area_b * height_b; float overlap_2d = box_overlap(box_a, box_b); float volume_overlap = overlap_2d * overlap_height; float result = volume_overlap / fmaxf(volume_a + volume_b - volume_overlap, EPS); #ifdef DEBUG printf("area_a=%f\n", area_a); printf("area_b=%f\n", area_b); printf("height_a=%f\n", height_a); printf("height_b=%f\n", height_b); printf("overlap_height=%f\n", overlap_height); printf("volume_a=%f\n", volume_a); printf("volume_b=%f\n", volume_b); printf("overlap_2d=%f\n", overlap_2d); printf("volume_overlap=%f\n", volume_overlap); printf("overlap result=%f\n", result); #endif return result; } __device__ inline float iou_normal(float const * const a, float const * const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_kernel_3d(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask, bool normal_iou) { //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 10]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 10 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 0]; block_boxes[threadIdx.x * 10 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 1]; block_boxes[threadIdx.x * 10 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 2]; block_boxes[threadIdx.x * 10 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 3]; block_boxes[threadIdx.x * 10 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 4]; block_boxes[threadIdx.x * 10 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 5]; block_boxes[threadIdx.x * 10 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 6]; block_boxes[threadIdx.x * 10 + 7] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 7]; block_boxes[threadIdx.x * 10 + 8] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 8]; block_boxes[threadIdx.x * 10 + 9] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 9]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 10; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { float iou_st; if(normal_iou){ iou_st = iou_normal(cur_box, block_boxes + i * 10); } else{ iou_st = iou_bev(cur_box, block_boxes + i * 10); } if (iou_st > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __global__ void prepare_output_kernel_3d(const int N, const int max_keep, const int col_blocks, unsigned long long *mask, unsigned long long * remv_cpu, int* keep_idx, const float *boxes, float *bbox_after_nms) { // unsigned long long remv_cpu[col_blocks]; // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); int num_to_keep = 0; for (int i = 0; i < N; i++) { if(num_to_keep >= max_keep) {break;} int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; // if (!(remv_cpu[nblock] & (1ULL << inblock))) { // keep_idx[num_to_keep++] = i; // unsigned long long *p = &mask[0] + i * col_blocks; // for (int j = nblock; j < col_blocks; j++) { // remv_cpu[j] |= p[j]; // } // } if (!(remv_cpu[nblock] & (1ULL << inblock))) { for (int k = 0; k < 10; k++){ bbox_after_nms[num_to_keep * 10 + k] = boxes[i * 10 + k]; } keep_idx[num_to_keep++] = i; unsigned long long *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } } template <> void NMS3DForward<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& in_data, const std::vector<OpReqType>& req, const std::vector<TBlob>& out_data) { using namespace mshadow; size_t expected_in = 1; size_t expected_out = 2; // input: boxes(B,N,10), which is sorted with score // output: keep_idx(B,num_boxes) CHECK_EQ(in_data.size(), expected_in); CHECK_EQ(out_data.size(), expected_out); CHECK_EQ(in_data[0].shape_[2], 10); CHECK_EQ(out_data[0].shape_[0], in_data[0].shape_[0]); const NMS3DParam param = nnvm::get<NMS3DParam>(attrs.parsed); const int B = in_data[0].size(0); const int N = in_data[0].size(1); const int max_keep = param.max_keep; const float iou_thres = param.iou_thres; const bool normal_iou = param.normal_iou; CHECK_EQ(out_data[0].shape_[1], max_keep); Stream<gpu>* s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); // assume all the data and gradient have the same type MSHADOW_TYPE_SWITCH(in_data[0].type_flag_, DType, { const float* boxes = in_data[0].dptr<float>(); int* keep_idx = out_data[0].dptr<int>(); float* bbox_after_nms = out_data[1].dptr<float>(); Fill<true>(s, out_data[0], kWriteTo, -1); Fill<true>(s, out_data[1], kWriteTo, 0.0); const int col_blocks = DIVUP(N, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; CHECK_ERROR(hipMalloc((void**)&mask_data, B * N * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(N, THREADS_PER_BLOCK_NMS), DIVUP(N, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); unsigned long long *remv_dev = NULL; CHECK_ERROR(hipMalloc((void**)&remv_dev, col_blocks * sizeof(unsigned long long))); // iterate through batch for(int b = 0; b < B; b++) { // calculate overlap matrix hipLaunchKernelGGL(( nms_kernel_3d), dim3(blocks), dim3(threads), 0, 0, N, iou_thres, boxes+b*N*10, mask_data+b*N*col_blocks, normal_iou); CHECK_ERROR(hipMemset(remv_dev, 0, col_blocks * sizeof(unsigned long long))); hipLaunchKernelGGL(( prepare_output_kernel_3d), dim3(1),dim3(1),0,stream, N, max_keep, col_blocks, mask_data+b*N*col_blocks, remv_dev, keep_idx + b * max_keep, boxes + b * N * 10, bbox_after_nms + b * max_keep * 10); hipError_t err = hipGetLastError(); if (hipSuccess != err) { LOG(FATAL) << "CUDA kernel failed : " << hipGetErrorString(err); exit(-1); } } hipFree(mask_data); }); } NNVM_REGISTER_OP(_contrib_NMS3D) .set_attr<FCompute>("FCompute<gpu>", NMS3DForward<gpu>); } }
d5e2495b6849652ff6f944f84d7f7ec64a420880.cu
/*! * \file * \brief * \author */ #include <stdio.h> #include <math.h> #include "./nms_3d-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) #define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ float norm(){ return sqrt(x * x + y * y); } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float inner_product(const Point &A, const Point &B, const Point &C){ // vector:AB * vector:AC Point AB = B - A; Point AC = C - A; return AB.x * AC.x + AB.y * AC.y; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross_3d(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box3d(const float *box, const Point &P){ // const float MARGIN = 1e-5; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); float dot1 = inner_product(B,A,P); if (dot1 < 0) return false; float dot2 = inner_product(B,C,P); if (dot2 < 0) return false; float dot3 = inner_product(D,A,P); if (dot3 < 0) return false; float dot4 = inner_product(D,C,P); if (dot4 < 0) return false; return true; } __device__ inline int check_in_box3d_anotherway(const float *box, const Point &P){ const float MARGIN = -1e-2; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); Point AB_vec = B - A; Point BC_vec = C - B; Point CD_vec = D - C; Point DA_vec = A - D; auto is_clock_wise = cross(AB_vec,BC_vec); #ifdef DEBUG printf("AB_vec: (%f, %f)\n", AB_vec.x, AB_vec.y); printf("BC_vec: (%f, %f)\n", BC_vec.x, BC_vec.y); printf("CD_vec: (%f, %f)\n", CD_vec.x, CD_vec.y); printf("DA_vec: (%f, %f)\n", DA_vec.x, DA_vec.y); printf("is_clock_wise: %f\n", is_clock_wise); #endif Point PA_vec = A - P; float cross1 = cross(PA_vec, AB_vec); if (cross1 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross1: %f, PA.x: %f, PA.y: %f\n", cross1, PA_vec.x, PA_vec.y); #endif return false; } Point PB_vec = B - P; float cross2 = cross(PB_vec, BC_vec); if (cross2 * is_clock_wise < MARGIN){ #ifdef DEBUG printf("cross2: %f, PB.x: %f, PB.y: %f\n", cross2, PB_vec.x, PB_vec.y); #endif return false; } Point PC_vec = C - P; float cross3 = cross(PC_vec, CD_vec); if (cross3 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross3: %f, PC.x: %f, PC.y: %f\n", cross3, PC_vec.x, PC_vec.y); #endif return false; } Point PD_vec = D - P; float cross4 = cross(PD_vec, DA_vec); if (cross4 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross4: %f, PD.x: %f, PD.y: %f\n", cross4, PD_vec.x, PD_vec.y); #endif return false; } return true; } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross_3d(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float get_area(const float* box){ float x1 = box[0], y1 = box[1], x2 = box[2], y2 = box[3], x3 = box[4], y3 = box[5]; float edge1 = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); float edge2 = (x3 - x2) * (x3 - x2) + (y3 - y2) * (y3 - y2); return sqrt(edge1 * edge2); } __device__ inline float max4(const float x1, const float x2, const float x3, const float x4){ float max = -1000000; if (x1 > max) max = x1; if (x2 > max) max = x2; if (x3 > max) max = x3; if (x4 > max) max = x4; return max; } __device__ inline float min4(const float x1, const float x2, const float x3, const float x4){ float min = 1000000; if (x1 < min) min = x1; if (x2 < min) min = x2; if (x3 < min) min = x3; if (x4 < min) min = x4; return min; } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // float x_a_min = min4(box_a[0], box_a[2], box_a[4], box_a[6]); // float x_a_max = max4(box_a[0], box_a[2], box_a[4], box_a[6]); // float y_a_min = min4(box_a[1], box_a[3], box_a[5], box_a[7]); // float y_a_max = max4(box_a[1], box_a[3], box_a[5], box_a[7]); // float x_b_min = min4(box_b[0], box_b[2], box_b[4], box_b[6]); // float x_b_max = max4(box_b[0], box_b[2], box_b[4], box_b[6]); // float y_b_min = min4(box_b[1], box_b[3], box_b[5], box_b[7]); // float y_b_max = max4(box_b[1], box_b[3], box_b[5], box_b[7]); // if (x_a_max < x_b_min || x_a_min > x_b_max || y_a_max < y_b_min || y_a_min > y_b_max) return 0; // Point center_a; // center_a.set((box_a[0] + box_a[2] + box_a[4] + box_a[6]) / 4.0, // (box_a[1] + box_a[3] + box_a[5] + box_a[7]) / 4.0); // // printf("center_a:(%f, %f)\n", center_a.x, center_a.y); // Point center_b; // center_b.set((box_b[0] + box_b[2] + box_b[4] + box_b[6]) / 4.0, // (box_b[1] + box_b[3] + box_b[5] + box_b[7]) / 4.0); // // printf("center_b:(%f, %f)\n", center_b.x, center_b.y); // Point two_center_vec = center_a - center_b; // // printf("two_center_vec:(%f, %f)\n", two_center_vec.x, two_center_vec.y); // float center_dist = two_center_vec.norm(); // // printf("center_dist:%f\n", center_dist); // float area_a = get_area(box_a); // float area_b = get_area(box_b); // float min_area = area_a < area_b ? area_a : area_b; // if (center_dist < 0.2){ // return min_area; // } // else return 0; Point box_a_corners[5]; box_a_corners[0].set(box_a[0],box_a[1]); box_a_corners[1].set(box_a[2],box_a[3]); box_a_corners[2].set(box_a[4],box_a[5]); box_a_corners[3].set(box_a[6],box_a[7]); Point box_b_corners[5]; box_b_corners[0].set(box_b[0],box_b[1]); box_b_corners[1].set(box_b[2],box_b[3]); box_b_corners[2].set(box_b[4],box_b[5]); box_b_corners[3].set(box_b[6],box_b[7]); box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; #ifdef DEBUG printf("Intersect point (%f, %f)\n", cross_points[cnt].x, cross_points[cnt].y); #endif cnt++; } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box3d_anotherway(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_a\n", box_b_corners[k].x, box_b_corners[k].y); #endif } if (check_in_box3d_anotherway(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_b\n", box_a_corners[k].x, box_a_corners[k].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); auto thread_id = threadIdx.x; for (int i = 0; i < cnt; i++){ printf("thread: %d, All cross point %d: (%.3f, %.3f)\n", thread_id, i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { float height_a = box_a[9] - box_a[8]; float height_b = box_b[9] - box_b[8]; float overlap_height = fminf(box_a[9], box_b[9]) - fmaxf(box_a[8], box_b[8]); if (overlap_height < 0) overlap_height = 0; float area_a = get_area(box_a); float area_b = get_area(box_b); float volume_a = area_a * height_a; float volume_b = area_b * height_b; float overlap_2d = box_overlap(box_a, box_b); float volume_overlap = overlap_2d * overlap_height; float result = volume_overlap / fmaxf(volume_a + volume_b - volume_overlap, EPS); #ifdef DEBUG printf("area_a=%f\n", area_a); printf("area_b=%f\n", area_b); printf("height_a=%f\n", height_a); printf("height_b=%f\n", height_b); printf("overlap_height=%f\n", overlap_height); printf("volume_a=%f\n", volume_a); printf("volume_b=%f\n", volume_b); printf("overlap_2d=%f\n", overlap_2d); printf("volume_overlap=%f\n", volume_overlap); printf("overlap result=%f\n", result); #endif return result; } __device__ inline float iou_normal(float const * const a, float const * const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_kernel_3d(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask, bool normal_iou) { //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 10]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 10 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 0]; block_boxes[threadIdx.x * 10 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 1]; block_boxes[threadIdx.x * 10 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 2]; block_boxes[threadIdx.x * 10 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 3]; block_boxes[threadIdx.x * 10 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 4]; block_boxes[threadIdx.x * 10 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 5]; block_boxes[threadIdx.x * 10 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 6]; block_boxes[threadIdx.x * 10 + 7] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 7]; block_boxes[threadIdx.x * 10 + 8] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 8]; block_boxes[threadIdx.x * 10 + 9] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 9]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 10; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { float iou_st; if(normal_iou){ iou_st = iou_normal(cur_box, block_boxes + i * 10); } else{ iou_st = iou_bev(cur_box, block_boxes + i * 10); } if (iou_st > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __global__ void prepare_output_kernel_3d(const int N, const int max_keep, const int col_blocks, unsigned long long *mask, unsigned long long * remv_cpu, int* keep_idx, const float *boxes, float *bbox_after_nms) { // unsigned long long remv_cpu[col_blocks]; // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); int num_to_keep = 0; for (int i = 0; i < N; i++) { if(num_to_keep >= max_keep) {break;} int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; // if (!(remv_cpu[nblock] & (1ULL << inblock))) { // keep_idx[num_to_keep++] = i; // unsigned long long *p = &mask[0] + i * col_blocks; // for (int j = nblock; j < col_blocks; j++) { // remv_cpu[j] |= p[j]; // } // } if (!(remv_cpu[nblock] & (1ULL << inblock))) { for (int k = 0; k < 10; k++){ bbox_after_nms[num_to_keep * 10 + k] = boxes[i * 10 + k]; } keep_idx[num_to_keep++] = i; unsigned long long *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } } template <> void NMS3DForward<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& in_data, const std::vector<OpReqType>& req, const std::vector<TBlob>& out_data) { using namespace mshadow; size_t expected_in = 1; size_t expected_out = 2; // input: boxes(B,N,10), which is sorted with score // output: keep_idx(B,num_boxes) CHECK_EQ(in_data.size(), expected_in); CHECK_EQ(out_data.size(), expected_out); CHECK_EQ(in_data[0].shape_[2], 10); CHECK_EQ(out_data[0].shape_[0], in_data[0].shape_[0]); const NMS3DParam param = nnvm::get<NMS3DParam>(attrs.parsed); const int B = in_data[0].size(0); const int N = in_data[0].size(1); const int max_keep = param.max_keep; const float iou_thres = param.iou_thres; const bool normal_iou = param.normal_iou; CHECK_EQ(out_data[0].shape_[1], max_keep); Stream<gpu>* s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); // assume all the data and gradient have the same type MSHADOW_TYPE_SWITCH(in_data[0].type_flag_, DType, { const float* boxes = in_data[0].dptr<float>(); int* keep_idx = out_data[0].dptr<int>(); float* bbox_after_nms = out_data[1].dptr<float>(); Fill<true>(s, out_data[0], kWriteTo, -1); Fill<true>(s, out_data[1], kWriteTo, 0.0); const int col_blocks = DIVUP(N, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; CHECK_ERROR(cudaMalloc((void**)&mask_data, B * N * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(N, THREADS_PER_BLOCK_NMS), DIVUP(N, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); unsigned long long *remv_dev = NULL; CHECK_ERROR(cudaMalloc((void**)&remv_dev, col_blocks * sizeof(unsigned long long))); // iterate through batch for(int b = 0; b < B; b++) { // calculate overlap matrix nms_kernel_3d<<<blocks, threads>>>(N, iou_thres, boxes+b*N*10, mask_data+b*N*col_blocks, normal_iou); CHECK_ERROR(cudaMemset(remv_dev, 0, col_blocks * sizeof(unsigned long long))); prepare_output_kernel_3d<<<1,1,0,stream>>>(N, max_keep, col_blocks, mask_data+b*N*col_blocks, remv_dev, keep_idx + b * max_keep, boxes + b * N * 10, bbox_after_nms + b * max_keep * 10); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { LOG(FATAL) << "CUDA kernel failed : " << cudaGetErrorString(err); exit(-1); } } cudaFree(mask_data); }); } NNVM_REGISTER_OP(_contrib_NMS3D) .set_attr<FCompute>("FCompute<gpu>", NMS3DForward<gpu>); } }
ded97137f37b04d640d9190798ca684ee78e9dd8.hip
// !!! This is a file automatically generated by hipify!!! #include <cfloat> #include <chrono> #include <hip/hip_runtime_api.h> #include <iostream> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////////////////////// hipError_t SAFE_CALL (hipError_t result) { if(result != hipSuccess) { printf("CUDA error: %s at call #CallInstruction\n", hipGetErrorString(result)); throw "error in CUDA API function, aborting..."; } return result; } hipError_t SAFE_KERNEL_CALL (hipError_t result) { if(result != hipSuccess) { printf("CUDA error in kernel launch: %s at kernel #KernelCallInstruction\n", hipGetErrorString(result)); throw "error in CUDA kernel launch, aborting..."; } result = hipDeviceSynchronize(); if(result != hipSuccess) { printf("CUDA error in kernel execution: %s at kernel \"#KernelCallInstruction\"\n", hipGetErrorString(result)); throw "error in CUDA kernel execution, aborting..."; } return result; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////// // void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) // { // const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; // if (src_id < vertices_count) // { // const int first_edge_ptr = ptrs[src_id]; // const int connections_count = connections[src_id]; // //connections_count = ptrs[src_id + 1] - ptrs[src_id]; // for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) // { // // first_edge_ptr + cur_edge - // int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; // int val = data[dst_id]; // result[first_edge_ptr + cur_edge] = val; // // , : // /* BFS // int src_level = data[src_id]; // int dst_level = data[dst_id]; // if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) // { // data[dst_id] = current_level + 1; // } // */ // /* SSSP // float weight = outgoing_weights[first_edge_ptr + cur_edge]; // float src_weight = data[src_id]; // float dst_weight = data[dst_id]; // if(dst_weight > src_weight + weight) // { // data[dst_id] = src_weight + weight; // } // */ // } // } // } int main() { int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; for(int i = 0; i < vertices_count; i++) // TODO (bonus) "" { ptrs[i] = pos; connections[i] = 16 + rand()%32; pos += connections[i]; data[i] = rand(); } int edges_count = pos; int *outgoing_ids = new int[edges_count]; int *result = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; hipMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); hipMalloc((void**)&dev_connections, vertices_count*sizeof(int)); hipMalloc((void**)&dev_data, vertices_count*sizeof(int)); hipMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); hipMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL( hipMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_connections, connections, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_data, data, vertices_count * sizeof(int), hipMemcpyHostToDevice) ); SAFE_CALL( hipMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), hipMemcpyHostToDevice) ); dim3 compute_threads(1024); dim3 compute_blocks( (vertices_count - 1) / compute_threads.x + 1); for (int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( gather), dim3(compute_blocks), dim3(compute_threads), 0, 0, dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result); auto end = std::chrono::steady_clock::now(); // TODO ? std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } int *copy_device_result = new int[edges_count]; // TODO copy SAFE_CALL(hipMemcpy(copy_device_result, dev_result, edges_count * sizeof(int), hipMemcpyDeviceToHost)); for (int src_id = 0; src_id < vertices_count; src_id++) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } // TODO check int errors_count = 0; for (int i = 0; i < edges_count; i++) { if (result[i] != copy_device_result[i]) errors_count++; } cout << errors_count << endl; // TODO 3 ? // TODO , // TODO // TODO (bonus) BFS ( ) hipFree(dev_data); hipFree(dev_ptrs); hipFree(dev_connections); hipFree(dev_result); hipFree(dev_outgoing_ids); delete[]result; delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; }
ded97137f37b04d640d9190798ca684ee78e9dd8.cu
#include <cfloat> #include <chrono> #include <cuda_profiler_api.h> #include <iostream> using namespace std; /////////////////////////////////////////////////////////////////////////////////////////////////////////// cudaError_t SAFE_CALL (cudaError_t result) { if(result != cudaSuccess) { printf("CUDA error: %s at call #CallInstruction\n", cudaGetErrorString(result)); throw "error in CUDA API function, aborting..."; } return result; } cudaError_t SAFE_KERNEL_CALL (cudaError_t result) { if(result != cudaSuccess) { printf("CUDA error in kernel launch: %s at kernel #KernelCallInstruction\n", cudaGetErrorString(result)); throw "error in CUDA kernel launch, aborting..."; } result = cudaDeviceSynchronize(); if(result != cudaSuccess) { printf("CUDA error in kernel execution: %s at kernel \"#KernelCallInstruction\"\n", cudaGetErrorString(result)); throw "error in CUDA kernel execution, aborting..."; } return result; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////// void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////// // void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) // { // const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; // if (src_id < vertices_count) // { // const int first_edge_ptr = ptrs[src_id]; // const int connections_count = connections[src_id]; // //connections_count = ptrs[src_id + 1] - ptrs[src_id]; // for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) // { // // first_edge_ptr + cur_edge - индекс текущего ребра в массивах // int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; // int val = data[dst_id]; // result[first_edge_ptr + cur_edge] = val; // // данную программу можно легко переделать во многие графовые алгоритмы, например: // /* BFS // int src_level = data[src_id]; // int dst_level = data[dst_id]; // if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) // { // data[dst_id] = current_level + 1; // } // */ // /* SSSP // float weight = outgoing_weights[first_edge_ptr + cur_edge]; // float src_weight = data[src_id]; // float dst_weight = data[dst_id]; // if(dst_weight > src_weight + weight) // { // data[dst_id] = src_weight + weight; // } // */ // } // } // } int main() { int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; for(int i = 0; i < vertices_count; i++) // TODO (bonus) граф с несколькими "большими" вершинами { ptrs[i] = pos; connections[i] = 16 + rand()%32; pos += connections[i]; data[i] = rand(); } int edges_count = pos; int *outgoing_ids = new int[edges_count]; int *result = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; cudaMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_connections, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_data, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); cudaMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL( cudaMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_connections, connections, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_data, data, vertices_count * sizeof(int), cudaMemcpyHostToDevice) ); SAFE_CALL( cudaMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), cudaMemcpyHostToDevice) ); dim3 compute_threads(1024); dim3 compute_blocks( (vertices_count - 1) / compute_threads.x + 1); for (int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); gather<<<compute_blocks, compute_threads>>>(dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result); auto end = std::chrono::steady_clock::now(); // TODO почему работает данный замер веремени? std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } int *copy_device_result = new int[edges_count]; // TODO copy SAFE_CALL(cudaMemcpy(copy_device_result, dev_result, edges_count * sizeof(int), cudaMemcpyDeviceToHost)); for (int src_id = 0; src_id < vertices_count; src_id++) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for (register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } // TODO check int errors_count = 0; for (int i = 0; i < edges_count; i++) { if (result[i] != copy_device_result[i]) errors_count++; } cout << errors_count << endl; // TODO какие 3 недостатка у текущей версии ядра? // TODO отпрофилировать текущую версию, сделать выводы о её производитлеьности // TODO сделать оптимизированную версию ядра // TODO (bonus) реализовать базовую версию BFS алгоритма (выделить структуры данных и реализовать сам алгоритм) cudaFree(dev_data); cudaFree(dev_ptrs); cudaFree(dev_connections); cudaFree(dev_result); cudaFree(dev_outgoing_ids); delete[]result; delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; }
1b4adf9219f446bd30bd42fc15deb950e482aba4.hip
// !!! This is a file automatically generated by hipify!!! // 20181010 // Yuqiong Li // Matrix multiplication with CUDA #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #define index(i, j, n) ((i) * (n) + (j)) // declare global kernel function __global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r); int main(){ unsigned int m = 2000, n = 2000, r = 1000; // dimensions float * a, * b, * c, *temp ; // declare matrices a = (float *) malloc(m * n * sizeof(float)); // a is m by n b = (float *) malloc(n * r * sizeof(float)); // b is n by r c = (float *) calloc(m * r, sizeof(float)); // c is m by r : the result matrix temp = (float *) calloc(m * r, sizeof(float)); // to store GPU results int i = 0, j = 0; // initializing a for (i = 0; i < m; i++){ for (j = 0; j < n; j++) a[index(i, j, n)] = i + j; } // initializing b for (i = 0; i < n; i++){ for (j = 0; j < r; j++) b[index(i, j, r)] = i + j + 1; } double time_taken; clock_t start, end; // CPU version start = clock(); int k = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ for (k = 0; k < n; k++) c[index(i, j, r)] += a[index(i, k, n)] * b[index(k, j, r)]; } } end = clock(); time_taken = (double) (end - start) / CLOCKS_PER_SEC; printf("Time taken for CPU is %.2f.\n", time_taken); float val = 0.0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += c[index(i, j, r)]; } } printf("Check value for CPU: sum is %.2f\n.", val); // 1. allocate device memory for cuda variables float * d_a, * d_b, * d_c; hipMalloc((void **) &d_a, m * n * sizeof(float)); hipMalloc((void **) &d_b, n * r * sizeof(float)); hipMalloc((void **) &d_c, m * r * sizeof(float)); // copy memory to device hipMemcpy(d_a, a, m * n * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, n * r * sizeof(float), hipMemcpyHostToDevice); // 2. invoke kernel function dim3 blocksPerGrid(ceil(m/16.0), ceil(r/16.0), 1); dim3 threadsPerBlock(16, 16, 1); start = clock(); hipLaunchKernelGGL(( matrixMulKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c, m, n, r); end = clock(); time_taken = (double) (end - start)/ CLOCKS_PER_SEC; printf("Time taken for GPU is %.2f\n", time_taken); // 3. copy results to device hipMemcpy(temp, d_c, m * r * sizeof(float), hipMemcpyDeviceToHost); val = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += temp[index(i, j, r)]; } } printf("Check value for GPU: sum is %.2f\n", val); free(a); free(b); free(c); free(temp); hipFree(d_c); hipFree(d_a); hipFree(d_b); return 0; } __global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r){ // a function to perform matrix multiplication // a is m by n; b is n by r; c is the result m by r unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < m) && (col < r)){ float pvalue = 0; int k = 0; for (k = 0; k < n; k++){ pvalue += a[index(row, k, n)] * b[index(k, col, r)]; } c[index(row, col, r)] = pvalue; } }
1b4adf9219f446bd30bd42fc15deb950e482aba4.cu
// 20181010 // Yuqiong Li // Matrix multiplication with CUDA #include <stdlib.h> #include <cuda.h> #include <time.h> #include <stdio.h> #define index(i, j, n) ((i) * (n) + (j)) // declare global kernel function __global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r); int main(){ unsigned int m = 2000, n = 2000, r = 1000; // dimensions float * a, * b, * c, *temp ; // declare matrices a = (float *) malloc(m * n * sizeof(float)); // a is m by n b = (float *) malloc(n * r * sizeof(float)); // b is n by r c = (float *) calloc(m * r, sizeof(float)); // c is m by r : the result matrix temp = (float *) calloc(m * r, sizeof(float)); // to store GPU results int i = 0, j = 0; // initializing a for (i = 0; i < m; i++){ for (j = 0; j < n; j++) a[index(i, j, n)] = i + j; } // initializing b for (i = 0; i < n; i++){ for (j = 0; j < r; j++) b[index(i, j, r)] = i + j + 1; } double time_taken; clock_t start, end; // CPU version start = clock(); int k = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ for (k = 0; k < n; k++) c[index(i, j, r)] += a[index(i, k, n)] * b[index(k, j, r)]; } } end = clock(); time_taken = (double) (end - start) / CLOCKS_PER_SEC; printf("Time taken for CPU is %.2f.\n", time_taken); float val = 0.0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += c[index(i, j, r)]; } } printf("Check value for CPU: sum is %.2f\n.", val); // 1. allocate device memory for cuda variables float * d_a, * d_b, * d_c; cudaMalloc((void **) &d_a, m * n * sizeof(float)); cudaMalloc((void **) &d_b, n * r * sizeof(float)); cudaMalloc((void **) &d_c, m * r * sizeof(float)); // copy memory to device cudaMemcpy(d_a, a, m * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n * r * sizeof(float), cudaMemcpyHostToDevice); // 2. invoke kernel function dim3 blocksPerGrid(ceil(m/16.0), ceil(r/16.0), 1); dim3 threadsPerBlock(16, 16, 1); start = clock(); matrixMulKernel<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, m, n, r); end = clock(); time_taken = (double) (end - start)/ CLOCKS_PER_SEC; printf("Time taken for GPU is %.2f\n", time_taken); // 3. copy results to device cudaMemcpy(temp, d_c, m * r * sizeof(float), cudaMemcpyDeviceToHost); val = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += temp[index(i, j, r)]; } } printf("Check value for GPU: sum is %.2f\n", val); free(a); free(b); free(c); free(temp); cudaFree(d_c); cudaFree(d_a); cudaFree(d_b); return 0; } __global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r){ // a function to perform matrix multiplication // a is m by n; b is n by r; c is the result m by r unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < m) && (col < r)){ float pvalue = 0; int k = 0; for (k = 0; k < n; k++){ pvalue += a[index(row, k, n)] * b[index(k, col, r)]; } c[index(row, col, r)] = pvalue; } }
ce01cd1dbc27d1e333f0072dbd4c43801eeede3f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "maxout_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include <memory> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../maxout_layer.h" namespace nnforge { namespace cuda { template<typename position_type> __global__ void maxout_upd_kernel( float * __restrict output, position_type * __restrict max_feature_map_positions, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; int max_pos = 0; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; if (new_val > max_val) { max_val = new_val; max_pos = i; } } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; max_feature_map_positions[output_offset] = static_cast<position_type>(max_pos); } } __global__ void maxout_forward_only_upd_kernel( float * __restrict output, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; max_val = max(new_val, max_val); } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; } } template<typename position_type, bool add_update_to_destination> __global__ void maxout_backprop_upd_kernel( float * __restrict input_errors, const position_type * __restrict max_feature_map_positions, const float * __restrict output_errors, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; int max_feature_map = static_cast<int>(max_feature_map_positions[output_offset]); float output_error = output_errors[output_offset]; int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; for(int i = 0; i < feature_map_subsampling_size; ++i) { if (add_update_to_destination) input_errors[input_offset] += ((i == max_feature_map) ? output_error : 0.0F); else input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F); input_offset += output_feature_map_count * neuron_count_per_feature_map; } } } void maxout_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); if (actions.find(layer_action(layer_action::backward_data, 0)) == actions.end()) hipLaunchKernelGGL(( maxout_forward_only_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else if (feature_map_subsampling_size <= 256) hipLaunchKernelGGL(( maxout_upd_kernel<unsigned char>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *temporary_per_entry_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else hipLaunchKernelGGL(( maxout_upd_kernel<int>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *temporary_per_entry_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); if (feature_map_subsampling_size <= 256) { if (add_update_to_destination) hipLaunchKernelGGL(( maxout_backprop_upd_kernel<unsigned char, true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else hipLaunchKernelGGL(( maxout_backprop_upd_kernel<unsigned char, false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } else { if (add_update_to_destination) hipLaunchKernelGGL(( maxout_backprop_upd_kernel<int, true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else hipLaunchKernelGGL(( maxout_backprop_upd_kernel<int, false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_temporary_per_entry_buffer(unsigned int action_input_index) const { return true; } void maxout_layer_updater_cuda::updater_configured() { std::shared_ptr<const maxout_layer> layer_derived = std::dynamic_pointer_cast<const maxout_layer>(layer_schema); feature_map_subsampling_size = layer_derived->feature_map_subsampling_size; } size_t maxout_layer_updater_cuda::get_temporary_per_entry_buffer_size() const { size_t res = 0; if (actions.find(layer_action(layer_action::backward_data, 0)) != actions.end()) { if (feature_map_subsampling_size <= 256) return output_elem_count_per_entry * sizeof(unsigned char); else return output_elem_count_per_entry * sizeof(int); } return res; } } }
ce01cd1dbc27d1e333f0072dbd4c43801eeede3f.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "maxout_layer_updater_cuda.h" #include <cuda_runtime.h> #include <memory> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../maxout_layer.h" namespace nnforge { namespace cuda { template<typename position_type> __global__ void maxout_upd_kernel( float * __restrict output, position_type * __restrict max_feature_map_positions, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; int max_pos = 0; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; if (new_val > max_val) { max_val = new_val; max_pos = i; } } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; max_feature_map_positions[output_offset] = static_cast<position_type>(max_pos); } } __global__ void maxout_forward_only_upd_kernel( float * __restrict output, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; max_val = max(new_val, max_val); } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; } } template<typename position_type, bool add_update_to_destination> __global__ void maxout_backprop_upd_kernel( float * __restrict input_errors, const position_type * __restrict max_feature_map_positions, const float * __restrict output_errors, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; int max_feature_map = static_cast<int>(max_feature_map_positions[output_offset]); float output_error = output_errors[output_offset]; int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; for(int i = 0; i < feature_map_subsampling_size; ++i) { if (add_update_to_destination) input_errors[input_offset] += ((i == max_feature_map) ? output_error : 0.0F); else input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F); input_offset += output_feature_map_count * neuron_count_per_feature_map; } } } void maxout_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); if (actions.find(layer_action(layer_action::backward_data, 0)) == actions.end()) maxout_forward_only_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else if (feature_map_subsampling_size <= 256) maxout_upd_kernel<unsigned char><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *temporary_per_entry_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else maxout_upd_kernel<int><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *temporary_per_entry_buffer, *input_buffers[0], output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); if (feature_map_subsampling_size <= 256) { if (add_update_to_destination) maxout_backprop_upd_kernel<unsigned char, true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else maxout_backprop_upd_kernel<unsigned char, false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } else { if (add_update_to_destination) maxout_backprop_upd_kernel<int, true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); else maxout_backprop_upd_kernel<int, false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *temporary_per_entry_buffer, *output_errors_buffer, output_elem_count_per_feature_map, input_configuration_specific_list[0].feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } bool maxout_layer_updater_cuda::is_backward_data_dependent_on_temporary_per_entry_buffer(unsigned int action_input_index) const { return true; } void maxout_layer_updater_cuda::updater_configured() { std::shared_ptr<const maxout_layer> layer_derived = std::dynamic_pointer_cast<const maxout_layer>(layer_schema); feature_map_subsampling_size = layer_derived->feature_map_subsampling_size; } size_t maxout_layer_updater_cuda::get_temporary_per_entry_buffer_size() const { size_t res = 0; if (actions.find(layer_action(layer_action::backward_data, 0)) != actions.end()) { if (feature_map_subsampling_size <= 256) return output_elem_count_per_entry * sizeof(unsigned char); else return output_elem_count_per_entry * sizeof(int); } return res; } } }
a2147415a4fb5beb1adbb0dcd4e7acd32b34f08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include <iostream> #include <vector> #include <complex> #include "periodic_torsion.hpp" #include "gpu_utils.cuh" #include "k_bonded_deterministic.cuh" namespace timemachine { template <typename RealType, int D> PeriodicTorsion<RealType, D>::PeriodicTorsion( const std::vector<int> &torsion_idxs, // [A, 4] const std::vector<int> &param_idxs // [A, 3] ) : T_(torsion_idxs.size()/4) { if(torsion_idxs.size() % 4 != 0) { throw std::runtime_error("torsion_idxs.size() must be exactly 4*k"); } for(int a=0; a < T_; a++) { auto i = torsion_idxs[a*4+0]; auto j = torsion_idxs[a*4+1]; auto k = torsion_idxs[a*4+2]; auto l = torsion_idxs[a*4+3]; if(i == j || i == k || i == l || j == k || j == l || k == l) { throw std::runtime_error("torsion quads must be unique"); } } gpuErrchk(hipMalloc(&d_torsion_idxs_, T_*4*sizeof(*d_torsion_idxs_))); gpuErrchk(hipMemcpy(d_torsion_idxs_, &torsion_idxs[0], T_*4*sizeof(*d_torsion_idxs_), hipMemcpyHostToDevice)); gpuErrchk(hipMalloc(&d_param_idxs_, T_*3*sizeof(*d_param_idxs_))); gpuErrchk(hipMemcpy(d_param_idxs_, &param_idxs[0], T_*3*sizeof(*d_param_idxs_), hipMemcpyHostToDevice)); }; template <typename RealType, int D> PeriodicTorsion<RealType, D>::~PeriodicTorsion() { gpuErrchk(hipFree(d_torsion_idxs_)); gpuErrchk(hipFree(d_param_idxs_)); }; template <typename RealType, int D> void PeriodicTorsion<RealType, D>::execute_device( const int N, const int P, const double *d_coords, const double *d_coords_tangents, const double *d_params, unsigned long long *d_out_coords, double *d_out_coords_tangents, double *d_out_params_tangents, hipStream_t stream ) { int tpb = 32; int blocks = (T_+tpb-1)/tpb; auto start = std::chrono::high_resolution_clock::now(); if(d_coords_tangents == nullptr) { hipLaunchKernelGGL(( k_periodic_torsion_inference<RealType, D>), dim3(blocks), dim3(tpb), 0, stream, T_, d_coords, d_params, d_torsion_idxs_, d_param_idxs_, d_out_coords ); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); // auto finish = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> elapsed = finish - start; // std::cout << "PeriodicTorsion Elapsed time: " << elapsed.count() << " s\n"; } else { hipLaunchKernelGGL(( k_periodic_torsion_jvp<RealType, D>), dim3(blocks), dim3(tpb), 0, stream, T_, d_coords, d_coords_tangents, d_params, d_torsion_idxs_, d_param_idxs_, d_out_coords_tangents, d_out_params_tangents ); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); // auto finish = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> elapsed = finish - start; // std::cout << "PeriodicTorsion JVP Elapsed time: " << elapsed.count() << " s\n"; } }; template class PeriodicTorsion<double, 4>; template class PeriodicTorsion<double, 3>; template class PeriodicTorsion<float, 4>; template class PeriodicTorsion<float, 3>; } // namespace timemachine
a2147415a4fb5beb1adbb0dcd4e7acd32b34f08b.cu
#include <chrono> #include <iostream> #include <vector> #include <complex> #include "periodic_torsion.hpp" #include "gpu_utils.cuh" #include "k_bonded_deterministic.cuh" namespace timemachine { template <typename RealType, int D> PeriodicTorsion<RealType, D>::PeriodicTorsion( const std::vector<int> &torsion_idxs, // [A, 4] const std::vector<int> &param_idxs // [A, 3] ) : T_(torsion_idxs.size()/4) { if(torsion_idxs.size() % 4 != 0) { throw std::runtime_error("torsion_idxs.size() must be exactly 4*k"); } for(int a=0; a < T_; a++) { auto i = torsion_idxs[a*4+0]; auto j = torsion_idxs[a*4+1]; auto k = torsion_idxs[a*4+2]; auto l = torsion_idxs[a*4+3]; if(i == j || i == k || i == l || j == k || j == l || k == l) { throw std::runtime_error("torsion quads must be unique"); } } gpuErrchk(cudaMalloc(&d_torsion_idxs_, T_*4*sizeof(*d_torsion_idxs_))); gpuErrchk(cudaMemcpy(d_torsion_idxs_, &torsion_idxs[0], T_*4*sizeof(*d_torsion_idxs_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_param_idxs_, T_*3*sizeof(*d_param_idxs_))); gpuErrchk(cudaMemcpy(d_param_idxs_, &param_idxs[0], T_*3*sizeof(*d_param_idxs_), cudaMemcpyHostToDevice)); }; template <typename RealType, int D> PeriodicTorsion<RealType, D>::~PeriodicTorsion() { gpuErrchk(cudaFree(d_torsion_idxs_)); gpuErrchk(cudaFree(d_param_idxs_)); }; template <typename RealType, int D> void PeriodicTorsion<RealType, D>::execute_device( const int N, const int P, const double *d_coords, const double *d_coords_tangents, const double *d_params, unsigned long long *d_out_coords, double *d_out_coords_tangents, double *d_out_params_tangents, cudaStream_t stream ) { int tpb = 32; int blocks = (T_+tpb-1)/tpb; auto start = std::chrono::high_resolution_clock::now(); if(d_coords_tangents == nullptr) { k_periodic_torsion_inference<RealType, D><<<blocks, tpb, 0, stream>>>( T_, d_coords, d_params, d_torsion_idxs_, d_param_idxs_, d_out_coords ); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); // auto finish = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> elapsed = finish - start; // std::cout << "PeriodicTorsion Elapsed time: " << elapsed.count() << " s\n"; } else { k_periodic_torsion_jvp<RealType, D><<<blocks, tpb, 0, stream>>>( T_, d_coords, d_coords_tangents, d_params, d_torsion_idxs_, d_param_idxs_, d_out_coords_tangents, d_out_params_tangents ); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); // auto finish = std::chrono::high_resolution_clock::now(); // std::chrono::duration<double> elapsed = finish - start; // std::cout << "PeriodicTorsion JVP Elapsed time: " << elapsed.count() << " s\n"; } }; template class PeriodicTorsion<double, 4>; template class PeriodicTorsion<double, 3>; template class PeriodicTorsion<float, 4>; template class PeriodicTorsion<float, 3>; } // namespace timemachine
e979cdc2e1c6b4a7b3dfdffd2d265bbdf8e77545.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <iostream> #include "resize_bilinear_kernel.h" #include "common.h" using namespace nvinfer1; /// For checking if types are same template <typename T, typename U> struct is_same : std::false_type {}; template <typename T> __device__ struct is_same<T, T> : std::true_type {}; template <typename T, typename U> constexpr __device__ bool are_types_same() { return is_same<T, U>::value; } /// \brief CUDA kernel for calculating bilinear resizing of stacked images /// /// \see https://en.wikipedia.org/wiki/Bilinear_interpolation /// \see tensorflow/core/kernels/resize_bilinear_op_gpu.cu.cc /// /// In this implementation there is one thread per output pixel /// /// \tparam[in] T type of input tensors /// \tparam[in] align_corners If true, scaling is (in-1)/(out-i), otherwise /// in/out. True is normal computer vision / image processing bilinear /// resize, false is default for TensorFlow /// \param[in] width_in Width of the original image /// \param[in] height_in Height of the original image /// \param[in] in Stacked input images in NHW order /// \param[in] width_out Width of the resulting image /// \param[in] height_out Height of the resulting image /// \param[in] output_volume Volume of the output data /// (width_out*height_out*layers) /// \param[out] out Output data template <typename T> __global__ void KernelResizeBilinear(int const width_in, int const height_in, T const *in, int const width_out, int const height_out, uint32_t const output_volume, float const x_scale, float const y_scale, T *out) { // Calculate output pixel location uint32_t const idx_out = threadIdx.x + blockIdx.x * blockDim.x; // Make sure we do not over index mem if (idx_out > output_volume) { // This thread does not contribute return; } // These are whole integers, but we need them as floats float const out_x = static_cast<float>(idx_out % width_out); // Note that out_y is actually index in the whole block of images (across // layers) float const out_y = static_cast<float>(idx_out / width_out); // Y-index in 2D image float const in_y_img = (static_cast<int>(out_y) % height_out) * y_scale; // Input y is calculate little bit too complicated because of block shape // and thread indexing uint32_t const layer = out_y / height_out; // Y-index in the whole 'block' of images float const in_y = in_y_img + layer * height_in; // Calculate input pixel location (in floats) // Align corners is used tensorflows image_resizer_state.h to calculate the // scaling float const in_x = out_x * x_scale; // Convert back to integer. These indeces are the top- and left pixel // coordinates. Note use of floorf which is required to get same // results as tensorflow. Do not try to optimize it away uint32_t const in_xd0 = static_cast<uint32_t>(floorf(in_x)); uint32_t const in_yd0 = static_cast<uint32_t>(floorf(in_y)); // Calculate bottom and right pixel coordinates // For the last row and column we want to make sure that we do not overindex uint32_t const in_xd1 = (in_x < width_in - 1) ? ceilf(in_x) : width_in - 1; uint32_t const in_yd1 = (in_y_img < height_in - 1) ? ceilf(in_y) : height_in * (layer + 1) - 1; // Linear interpolation coefficients float const x_lerp = in_x - in_xd0; // 4 corner pixels // We could try to use __ldg if it improves performance // on some devices, but it seems to have same performance on RTX 2080. T const py0x0 = in[in_yd0 * width_in + in_xd0]; T const py1x0 = in[in_yd1 * width_in + in_xd0]; T const py0x1 = in[in_yd0 * width_in + in_xd1]; T const py1x1 = in[in_yd1 * width_in + in_xd1]; float const y_lerp = in_y - in_yd0; // Interpolate top and bottom in x-axis float const top = py0x0 + (static_cast<float>(py0x1) - static_cast<float>(py0x0)) * x_lerp; float const bot = py1x0 + (static_cast<float>(py1x1) - static_cast<float>(py1x0)) * x_lerp; // Finalize by y-axis interpolation if (are_types_same<int8_t, T>()) { out[idx_out] = fmaxf(fminf(roundf(top + (bot - top) * y_lerp), 127.0f), -128.0f); } else { out[idx_out] = top + (bot - top) * y_lerp; } } bool LauncherResizeBilinear(int const num_batches, Dims const in_dims, void const *data_in, Dims const size, DataType const type, bool const align_corners, hipStream_t stream, void *data_out) { assert(size.nbDims == 2); assert(in_dims.nbDims >= 2); // Calculate output tensor size Dims out_dims = in_dims; out_dims.d[out_dims.nbDims - 2] = size.d[0]; out_dims.d[out_dims.nbDims - 1] = size.d[1]; int const width_in = in_dims.d[in_dims.nbDims - 1]; int const height_in = in_dims.d[in_dims.nbDims - 2]; int const width_out = size.d[1]; int const height_out = size.d[0]; int const num_outputs = Volume(out_dims) * num_batches; // Kernel configuration int const threads_per_block = 128; int const blocks = (num_outputs + threads_per_block - 1) / threads_per_block; int const shared_mem = 0; // Calculate input pixel location (in floats) // Align corners is used tensorflows image_resizer_state.h to calculate the // scaling float const x_scale = align_corners ? (width_in - 1) / static_cast<float>(width_out - 1) : width_in / static_cast<float>(width_out); float const y_scale = align_corners ? (height_in - 1) / static_cast<float>(height_out - 1) : height_in / static_cast<float>(height_out); if (DataType::kFLOAT == type) { hipLaunchKernelGGL(( KernelResizeBilinear<float>) , dim3(blocks), dim3(threads_per_block), shared_mem, stream, width_in, height_in, static_cast<float const *>(data_in), width_out, height_out, num_outputs, x_scale, y_scale, static_cast<float *>(data_out)); } else if (DataType::kINT8 == type) { hipLaunchKernelGGL(( KernelResizeBilinear<int8_t>) , dim3(blocks), dim3(threads_per_block), shared_mem, stream, width_in, height_in, static_cast<int8_t const *>(data_in), width_out, height_out, num_outputs, x_scale, y_scale, static_cast<int8_t *>(data_out)); } else { std::cerr << "LauncherResizeBilinear:Unsupported data type" << std::endl; return false; } auto err = hipStreamSynchronize(stream); if (hipSuccess != err) { std::cerr << "LauncherResizeBilinear:Kernel launch failed: " << hipGetErrorName(err) << std::endl; return false; } return true; }
e979cdc2e1c6b4a7b3dfdffd2d265bbdf8e77545.cu
#include <cassert> #include <iostream> #include "resize_bilinear_kernel.h" #include "common.h" using namespace nvinfer1; /// For checking if types are same template <typename T, typename U> struct is_same : std::false_type {}; template <typename T> __device__ struct is_same<T, T> : std::true_type {}; template <typename T, typename U> constexpr __device__ bool are_types_same() { return is_same<T, U>::value; } /// \brief CUDA kernel for calculating bilinear resizing of stacked images /// /// \see https://en.wikipedia.org/wiki/Bilinear_interpolation /// \see tensorflow/core/kernels/resize_bilinear_op_gpu.cu.cc /// /// In this implementation there is one thread per output pixel /// /// \tparam[in] T type of input tensors /// \tparam[in] align_corners If true, scaling is (in-1)/(out-i), otherwise /// in/out. True is normal computer vision / image processing bilinear /// resize, false is default for TensorFlow /// \param[in] width_in Width of the original image /// \param[in] height_in Height of the original image /// \param[in] in Stacked input images in NHW order /// \param[in] width_out Width of the resulting image /// \param[in] height_out Height of the resulting image /// \param[in] output_volume Volume of the output data /// (width_out*height_out*layers) /// \param[out] out Output data template <typename T> __global__ void KernelResizeBilinear(int const width_in, int const height_in, T const *in, int const width_out, int const height_out, uint32_t const output_volume, float const x_scale, float const y_scale, T *out) { // Calculate output pixel location uint32_t const idx_out = threadIdx.x + blockIdx.x * blockDim.x; // Make sure we do not over index mem if (idx_out > output_volume) { // This thread does not contribute return; } // These are whole integers, but we need them as floats float const out_x = static_cast<float>(idx_out % width_out); // Note that out_y is actually index in the whole block of images (across // layers) float const out_y = static_cast<float>(idx_out / width_out); // Y-index in 2D image float const in_y_img = (static_cast<int>(out_y) % height_out) * y_scale; // Input y is calculate little bit too complicated because of block shape // and thread indexing uint32_t const layer = out_y / height_out; // Y-index in the whole 'block' of images float const in_y = in_y_img + layer * height_in; // Calculate input pixel location (in floats) // Align corners is used tensorflows image_resizer_state.h to calculate the // scaling float const in_x = out_x * x_scale; // Convert back to integer. These indeces are the top- and left pixel // coordinates. Note use of floorf which is required to get same // results as tensorflow. Do not try to optimize it away uint32_t const in_xd0 = static_cast<uint32_t>(floorf(in_x)); uint32_t const in_yd0 = static_cast<uint32_t>(floorf(in_y)); // Calculate bottom and right pixel coordinates // For the last row and column we want to make sure that we do not overindex uint32_t const in_xd1 = (in_x < width_in - 1) ? ceilf(in_x) : width_in - 1; uint32_t const in_yd1 = (in_y_img < height_in - 1) ? ceilf(in_y) : height_in * (layer + 1) - 1; // Linear interpolation coefficients float const x_lerp = in_x - in_xd0; // 4 corner pixels // We could try to use __ldg if it improves performance // on some devices, but it seems to have same performance on RTX 2080. T const py0x0 = in[in_yd0 * width_in + in_xd0]; T const py1x0 = in[in_yd1 * width_in + in_xd0]; T const py0x1 = in[in_yd0 * width_in + in_xd1]; T const py1x1 = in[in_yd1 * width_in + in_xd1]; float const y_lerp = in_y - in_yd0; // Interpolate top and bottom in x-axis float const top = py0x0 + (static_cast<float>(py0x1) - static_cast<float>(py0x0)) * x_lerp; float const bot = py1x0 + (static_cast<float>(py1x1) - static_cast<float>(py1x0)) * x_lerp; // Finalize by y-axis interpolation if (are_types_same<int8_t, T>()) { out[idx_out] = fmaxf(fminf(roundf(top + (bot - top) * y_lerp), 127.0f), -128.0f); } else { out[idx_out] = top + (bot - top) * y_lerp; } } bool LauncherResizeBilinear(int const num_batches, Dims const in_dims, void const *data_in, Dims const size, DataType const type, bool const align_corners, cudaStream_t stream, void *data_out) { assert(size.nbDims == 2); assert(in_dims.nbDims >= 2); // Calculate output tensor size Dims out_dims = in_dims; out_dims.d[out_dims.nbDims - 2] = size.d[0]; out_dims.d[out_dims.nbDims - 1] = size.d[1]; int const width_in = in_dims.d[in_dims.nbDims - 1]; int const height_in = in_dims.d[in_dims.nbDims - 2]; int const width_out = size.d[1]; int const height_out = size.d[0]; int const num_outputs = Volume(out_dims) * num_batches; // Kernel configuration int const threads_per_block = 128; int const blocks = (num_outputs + threads_per_block - 1) / threads_per_block; int const shared_mem = 0; // Calculate input pixel location (in floats) // Align corners is used tensorflows image_resizer_state.h to calculate the // scaling float const x_scale = align_corners ? (width_in - 1) / static_cast<float>(width_out - 1) : width_in / static_cast<float>(width_out); float const y_scale = align_corners ? (height_in - 1) / static_cast<float>(height_out - 1) : height_in / static_cast<float>(height_out); if (DataType::kFLOAT == type) { KernelResizeBilinear<float> <<<blocks, threads_per_block, shared_mem, stream>>>( width_in, height_in, static_cast<float const *>(data_in), width_out, height_out, num_outputs, x_scale, y_scale, static_cast<float *>(data_out)); } else if (DataType::kINT8 == type) { KernelResizeBilinear<int8_t> <<<blocks, threads_per_block, shared_mem, stream>>>( width_in, height_in, static_cast<int8_t const *>(data_in), width_out, height_out, num_outputs, x_scale, y_scale, static_cast<int8_t *>(data_out)); } else { std::cerr << "LauncherResizeBilinear:Unsupported data type" << std::endl; return false; } auto err = cudaStreamSynchronize(stream); if (cudaSuccess != err) { std::cerr << "LauncherResizeBilinear:Kernel launch failed: " << cudaGetErrorName(err) << std::endl; return false; } return true; }
8a411296999d8086b5019e8589a3f9a8ce1e763f.hip
// !!! This is a file automatically generated by hipify!!! /////////////////////////////////////////////////////////////////////////////// // cuda_proc.cu // // Contains definitions of functions related to CUDA image processor module /////////////////////////////////////////////////////////////////////////////// #include "cuda_proc.cuh" #include <cstdio> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include "cuda_common.cuh" #include "cuda_filter.cuh" #include "cuda_hist.cuh" #include "cuda_image.cuh" #include "cuda_kernel.cuh" #include "cuda_median.cuh" #include "log.hpp" // // Private members // CudaImage g_img_a; CudaImage g_img_b; // // Private functions // static void cuda_set_device(int device) { LOG_INFO("Setting CUDA device no. %d\n", device); checkCudaErrors(hipSetDevice(device)); } static void cuda_reset_device() { LOG_INFO("Resetting current CUDA device\n"); checkCudaErrors(hipDeviceReset()); } static void cuda_process_host_image_async( Image& h_dst, const Image& h_src, const Kernel& h_filter_kernel, size_t median_ksize) { assert(h_dst.cols == h_src.cols); assert(h_dst.rows == h_src.rows); const auto cols = h_src.cols; const auto rows = h_src.rows; const auto filter_ksize = h_filter_kernel.cols; // Form subimages from local images, suited to given size auto img_a = cuda_image_sub(g_img_a, cols, rows); auto img_b = cuda_image_sub(g_img_b, cols, rows); // Copy data from host asynchronously cuda_image_copy_from_host_async(img_a, h_src); cuda_filter_copy_kernel_from_host_async(h_filter_kernel); // Do right processing asynchronously cuda_median_async(img_b, img_a, median_ksize); cuda_filter_async_prep(img_b, filter_ksize); cuda_filter_async(img_a, img_b, filter_ksize); cuda_equalize_hist_async(img_b, img_a); // Copy data to host asynchronously cuda_image_copy_to_host_async(h_dst, img_b); } // // Public functions // void cuda_proc_init() { LOG_INFO("Initializing CUDA proc module\n"); // Initialize device cuda_set_device(0); // Initialize modules cuda_filter_init(); cuda_hist_init(); // Allocate local buffers g_img_a = cuda_create_image(ColsMax, RowsMax); g_img_b = cuda_create_image(ColsMax, RowsMax); } void cuda_proc_deinit() { LOG_INFO("Deinitializing CUDA proc module\n"); // Free local buffers cuda_free_image(g_img_b); cuda_free_image(g_img_a); // Deinitialize modules cuda_hist_deinit(); cuda_filter_deinit(); // Deinitialize device cuda_reset_device(); } Image cuda_process_host_image( const Image& h_src, const Kernel& h_kernel, size_t median_ksize) { const auto cols = h_src.cols; const auto rows = h_src.rows; LOG_INFO("Processing image with CUDA\n"); // Allocate temporary host buffer auto h_dst = cuda_create_host_image(cols, rows); // Page-lock host buffers cuda_host_image_register(h_src); cuda_host_image_register(h_dst); cuda_host_kernel_register(h_kernel); // Perform processing of host image cuda_process_host_image_async(h_dst, h_src, h_kernel, median_ksize); checkCudaErrors(hipDeviceSynchronize()); // Un-Page-lock host buffers cuda_host_kernel_unregister(h_kernel); cuda_host_image_unregister(h_dst); cuda_host_image_unregister(h_src); return h_dst; }
8a411296999d8086b5019e8589a3f9a8ce1e763f.cu
/////////////////////////////////////////////////////////////////////////////// // cuda_proc.cu // // Contains definitions of functions related to CUDA image processor module /////////////////////////////////////////////////////////////////////////////// #include "cuda_proc.cuh" #include <cstdio> #include <cuda_runtime.h> #include <helper_cuda.h> #include "cuda_common.cuh" #include "cuda_filter.cuh" #include "cuda_hist.cuh" #include "cuda_image.cuh" #include "cuda_kernel.cuh" #include "cuda_median.cuh" #include "log.hpp" // // Private members // CudaImage g_img_a; CudaImage g_img_b; // // Private functions // static void cuda_set_device(int device) { LOG_INFO("Setting CUDA device no. %d\n", device); checkCudaErrors(cudaSetDevice(device)); } static void cuda_reset_device() { LOG_INFO("Resetting current CUDA device\n"); checkCudaErrors(cudaDeviceReset()); } static void cuda_process_host_image_async( Image& h_dst, const Image& h_src, const Kernel& h_filter_kernel, size_t median_ksize) { assert(h_dst.cols == h_src.cols); assert(h_dst.rows == h_src.rows); const auto cols = h_src.cols; const auto rows = h_src.rows; const auto filter_ksize = h_filter_kernel.cols; // Form subimages from local images, suited to given size auto img_a = cuda_image_sub(g_img_a, cols, rows); auto img_b = cuda_image_sub(g_img_b, cols, rows); // Copy data from host asynchronously cuda_image_copy_from_host_async(img_a, h_src); cuda_filter_copy_kernel_from_host_async(h_filter_kernel); // Do right processing asynchronously cuda_median_async(img_b, img_a, median_ksize); cuda_filter_async_prep(img_b, filter_ksize); cuda_filter_async(img_a, img_b, filter_ksize); cuda_equalize_hist_async(img_b, img_a); // Copy data to host asynchronously cuda_image_copy_to_host_async(h_dst, img_b); } // // Public functions // void cuda_proc_init() { LOG_INFO("Initializing CUDA proc module\n"); // Initialize device cuda_set_device(0); // Initialize modules cuda_filter_init(); cuda_hist_init(); // Allocate local buffers g_img_a = cuda_create_image(ColsMax, RowsMax); g_img_b = cuda_create_image(ColsMax, RowsMax); } void cuda_proc_deinit() { LOG_INFO("Deinitializing CUDA proc module\n"); // Free local buffers cuda_free_image(g_img_b); cuda_free_image(g_img_a); // Deinitialize modules cuda_hist_deinit(); cuda_filter_deinit(); // Deinitialize device cuda_reset_device(); } Image cuda_process_host_image( const Image& h_src, const Kernel& h_kernel, size_t median_ksize) { const auto cols = h_src.cols; const auto rows = h_src.rows; LOG_INFO("Processing image with CUDA\n"); // Allocate temporary host buffer auto h_dst = cuda_create_host_image(cols, rows); // Page-lock host buffers cuda_host_image_register(h_src); cuda_host_image_register(h_dst); cuda_host_kernel_register(h_kernel); // Perform processing of host image cuda_process_host_image_async(h_dst, h_src, h_kernel, median_ksize); checkCudaErrors(cudaDeviceSynchronize()); // Un-Page-lock host buffers cuda_host_kernel_unregister(h_kernel); cuda_host_image_unregister(h_dst); cuda_host_image_unregister(h_src); return h_dst; }
e3ca6336eaf82125e92c68776749901507256658.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ /* ChesPartGL.h and ChesPartGL.cu are included at top of mainpart.cu #include "ChesPartGL.h" */ #include <typeinfo> // MakeShader.cpp bool ReadFileNew(string pFileName, string& outFile); static void AddShader(GLuint ShaderProgram, const char* pShaderText, GLenum ShaderType); void CompileShaders(string vs, string fs); void transmatrix(float *matrix, float tx,float ty, float tz); // flag for pausing motion in cudamove bool runaction=true; bool getRunAction(){ return runaction; } // thread method to be developed for disk reading //std::thread t12 ; void UpDData(); int DDataNum = 10; hiprandState_t* states; void UpDData() { // Thread programme to run continuously in background // Will change value of a global counter while(DDataNum>=0) { if (DDataNum==0) { // DDataNum == 0 is flag to cause a full datafile read to DD[0].DD3[3] printf("\n\n ******************Read DDataFile to DD3[3] = %d ",DD[0].DD3[3]); string newername; if (DD[0].IsFVCOM) { newername = NetCDFfiledateG(DD[0].filetemplate,DD); ReadFieldNetCDFG(newername,DD[0].DD3[3],DD,MM); } else { newername = NetCDFfiledate(DD[0].filetemplate,DD); ReadFieldNetCDF(newername,DD[0].DD3[3],DD,MM); } printf("***************** Finished Read DDataFile \n"); cout<< newername << endl<<endl; } DDataNum+=1; //printf(" display i=%d, DDataNum=%d \n",i, DDataNum); this_thread::sleep_for(chrono::milliseconds(2) ); } } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } //char fps[256]; //sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); //glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL() { int argc = 1; char *argv[1] = {(char*)"Something"}; glutInit(&argc, argv); //glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Regular NetCDF Particle Tracking"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions if (! isGLVersionSupported(2,0)) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } // default initialization // background color 234,255,199,1 is noaa map color //glClearColor(234./256,225./256.,199./256.,0.0); // NOAA color glClearColor(234./256.-.2,225./256.-.2,199./256.-.2,0.0); // Darker NOAA color //glClearColor(0.,0.,0.,1.0); // black or grey: 0.0050, 0.005, 0.0050, 1.0); glColor4f(0.0,1.0,0.0,1.0); // set color //glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 20.0); // near and far clipping planes .1,10. SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// // Initialize a few gl, timer and cuda // then start gl looping to call function display //////////////////////////////////////////////////////////////////////////////// bool GLmoveparticle(struct PPart *PP, struct MMesh *MM, struct DData *DD) { //, struct CCcontrol *CC) //int DD3[4]; // Create the CUTIL timer sdkCreateTimer(&timer); g_time_now = (DD[0].time + DD[1].time)/2.0; Dot_Size = MM[0].Dot_Size; //initial the cudaDevice to use, as if there is a choice? hipDeviceProp_t deviceProp; int devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors(hipSetDevice(devID)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n" , devID, deviceProp.name, deviceProp.major, deviceProp.minor); // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. //if (false == initGL(&argc, argv)) if (false == initGL()) { return false; } // register callbacks. these are locally defined functions glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); #if defined (__APPLE__) || defined(MACOSX) atexit(cleanup); #else glutCloseFunc(cleanup); #endif // create VBO createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard); // routine in MakeShader.cpp Creates gWVPLocation for use by RenderSceneCB() CompileShaders(MM[0].shadervs, MM[0].shaderfs); // Launch UpDData() threaded. Will run in background till end of time std::thread t(UpDData ); //std::thread t1g(ReadFieldNetCDFG, std::ref(newername),std::ref(DD[0].DD3[3]), // std::ref(DD),std::ref(MM) ); // run the cuda part from routine display // specified in glutDisplayFunc(display); // which is triggered by glutMainLoop //runCuda(&cuda_vbo_resource); // start rendering mainloop printf(" Start glutMainLoop >display>runCuda \n\n"); glutMainLoop(); printf(" Return from glutMainLoop\n"); // } return true; } /* this GPU kernel function is used to initialize the random states */ __global__ void initcurand(unsigned int seed, hiprandState_t* states) { int cudaindex = threadIdx.x + blockIdx.x * blockDim.x; /* we have to initialize the state */ hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ cudaindex, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[cudaindex]); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation, called from display //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { //printf("TFG runCuda host_P[10].x_present %g\n",host_P[10].x_present); // map OpenGL buffer object for writing from CUDA float4 *dptr; //float4 *dptr2; float time_now; size_t DDSizeGeneral = sizeof(DData)*4; size_t MMSizeGeneral = sizeof(MMesh)*4; if (iDD==-1){ // First update, need to localize DD, MM only once // initialized in ChesPartGL.h, global to this file printf("\n runCuda First Pass\n"); try { printf(" Can I print DD[0].time_now %g\n",DD[0].time_now); } catch (const std::runtime_error& e){ printf(" Error on print DD[0].time_now Message: %s\n",e.what()); } hipMemcpy(DD, dev_DD,DDSizeGeneral,hipMemcpyDeviceToHost); hipMemcpy(MM, dev_MM,MMSizeGeneral,hipMemcpyDeviceToHost); printf(" After hipMemcpy DD[0].time_now %fsec %f hr\n",DD[0].time_now,DD[0].time_now/3600.); iDD=0; // Initialize the damn random number generator // outside hiprandState_t* states; /* allocate space on the GPU for the random states */ hipMalloc((void**) &states, 256*64* sizeof(hiprandState_t)); hipLaunchKernelGGL(( initcurand), dim3(256),dim3(64), 0, 0, 16343, states); } checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0)); //1 size_t num_bytes; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); // *vbo_resource //checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr2, &num_bytes, //vbo_resource[1])); //int DD33=1; 256,64 72*32 = 2304 cuda cores // 144,32 all cudas enumerated once hipLaunchKernelGGL(( move3d), dim3(144),dim3(32) , 0, 0, dptr,dev_P,dev_MM,dev_DD,states); hipDeviceSynchronize(); DD[0].time_now += CUDA_STEPS* DT_SEC; // 0.01f; time_now = DD[0].time_now; //printf("After cuda move3d time_now = %fs %ghr\n",time_now, time_now/3600.); float time_frac=(time_now - DD[DD[0].DD3[0]].time)/(DD[DD[0].DD3[2]].time - DD[DD[0].DD3[0]].time); bool timetest = (time_frac > .75); // Dummy counter reset of UpDData flag //if (DDataNum>1000) { // printf("\n\n\n ****************\n RunCuda reset of DDataNum=%d\n ****************\n\n\n",DDataNum); // DDataNum=0; //} if (timetest ){ // Every hour a new data file is needed. Read dev_DD to obtain time_now // Assume or test that the fourth ReadData thread is finished and move to dev_DD hipMemcpy(dev_DD,DD,DDSizeGeneral,hipMemcpyHostToDevice); // Update DD3 for (int i=0; i<4 ; i++)DD[0].DD3[i]=(DD[0].DD3[i]+1)%4; // DD3[3] is next spot to be updated, will be updated in this section // Thread this off to execute while elsewhere. // printf(" DD[# 1].time = %g %g %g %g\n",DD[0].time/3600.,DD[1].time/3600.,DD[2].time/3600.,DD[3].time/3600.); DD[0].ToDay +=3600; // for hourly files string newername; /* if (DD[0].IsFVCOM) {newername = NetCDFfiledateG(DD[0].filetemplate,DD);} else {newername = NetCDFfiledate(DD[0].filetemplate,DD);} //string newername = NetCDFfiledate(DD[0].filetemplate,DD); cout<< newername << endl; //char fps[256]; //strftime(fps,80, "Chesapeake Bay %A %G %b %d %r ", gmtime(&DD[0].ToDay)); //strftime(&fps[80],80, "more Time= %F %R.", gmtime(&MM[0].ToDay)); //glutSetWindowTitle(fps); bool RunThreadRead = false; if (RunThreadRead) { if (DD[0].IsFVCOM) {std::thread t1g(ReadFieldNetCDFG, std::ref(newername),std::ref(DD[0].DD3[3]), std::ref(DD),std::ref(MM) ); t1g.join(); // Wait here for thread to finish. Makes threading moot. Testing only. } else { //std::thread & t1; printf(" thread start t1(ReadFieldNetCDF \n"); std::thread t1(ReadFieldNetCDF, std::ref(newername),std::ref(DD[0].DD3[3]), std::ref(DD),std::ref(MM) ); //printf(" thread after t1(ReadFieldNetCDF \n"); t1.join(); // Wait here for thread to finish. Makes threading moot. Testing only. //t1.detach(); // Let it loose, but with no test for finished crashes //printf(" thread after t1.join() \n"); //std::thread t2(ReadFieldNetCDF, std::ref(newername),std::ref(DD[0].DD3[3]), //std::ref(DD),std::ref(MM) ); //t2.join(); //printf("after second join\n"); } } else { if (DD[0].IsFVCOM) { ReadFieldNetCDFG(newername,DD[0].DD3[3],DD,MM); } else { ReadFieldNetCDF(newername,DD[0].DD3[3],DD,MM); } } */ // Reset of UpDData flag to cause call to read data by threaded UpDData() DDataNum=0; printf("\n\n\n ****************\n RunCuda DDataNum=%d\n ****************\n\n\n",DDataNum); float dhr=3600.; printf(" DD[ 0:3].time = %g %g %g %g\n",DD[0].time/dhr,DD[1].time/dhr,DD[2].time/dhr,DD[3].time/dhr); iDD+=1; printf(" iDD = %d time_now=%g\n\n",iDD,time_now/dhr); } // End of hourly DD update char fps[256]; time_t tnow; tnow =DD[0].ToDay; tnow = MM[0].Time_Init + time_now -MM[0].time_init; //strftime(fps,80, "Chesapeake Bay3 %A %G %b %d %I:%M %R ", gmtime(&DD[0].ToDay)); strftime(fps,80, "Chesapeake Bay2 %A %G %b %d %R ", gmtime(&tnow)); //strftime(&fps[80],80, "more Time= %F %R.", gmtime(&DD[0].ToDay)); glutSetWindowTitle(fps); // unmap buffer object checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(2, vbo); ////////////////////buffer number [0] //glBindBuffer(GL_ARRAY_BUFFER, *vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo[0]); // initialize buffer object //unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); unsigned size = MAX_GLPARTICLES *4*sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA //checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); /* */ ////////////////////buffer number [1] //glBindBuffer(GL_ARRAY_BUFFER, *vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo[1]); // initialize buffer object //size = mesh_width * mesh_height * 4 * sizeof(float); size = MAX_GLPARTICLES *4*sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); //glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); /* */ SDK_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(hipGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions if (getRunAction()) runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); //glMatrixMode(GL_PROJECTION); glLoadIdentity(); GLfloat idmatrix[16] = {1.0,0.,0.,0.,0.,1.,0.,0.,0.,0.,1.,0.,0.,0.,0.,1.}; //Identity float *newmatrix; newmatrix = matrix_RotTrPer(idmatrix, rotate_y,rotate_x,0.0, translate_x,translate_y,translate_z, (GLfloat)window_width, (GLfloat) window_height, znear, zfar, 30.0); // gWVPLocation points to the variable "gWVP" in the shader.vs as 4x4matrix glUniformMatrix4fv(gWVPLocation, 1, GL_FALSE, newmatrix); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glPointSize(Dot_Size); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LESS); // default was GL_LESS which gave backwards glEnable(GL_POINT_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); //glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDrawArrays(GL_POINTS, 0, MAX_GLPARTICLES); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { switch (key) { case (27) : #if defined(__APPLE__) || defined(MACOSX) exit(EXIT_SUCCESS); #else glutDestroyWindow(glutGetWindow()); return; #endif case (112) : // p pause { // Set flag to stop cuda move, but keep refreshing screen for mousing // p pause action toggles runaction if (runaction){ runaction=false;} else { runaction=true;} cout <<" Key = "<<key << " runaction "<<runaction<< endl; } break ; case (104) : // h help menu {printf("\nesc = stop\n p = toggle pause\n h = this help\n j = narrow view plane \n k = expand view plane\n"); printf(" Move: w^ s. a< d> \n");} break; case (106) : // j reset view Set matrix in display(). Doesn't really work too good. oh well. { znear = 1.0; zfar = 15.0; //printf(" j narrow view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 1.0, 2.); // near and far clipping planes .1,10. or .1,20. } break; case (107) : // k znear contract view { znear += 0.1; // zfar -= 1.0; //printf(" k contract znear view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, .1, 20.); } break; case (108) : // l zfar contract view { // znear += 0.1; zfar -= 1.0; //printf(" l contract zfar view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, .1, 20.); } break; case(119) : // w translate up { translate_y += 0.03f; } break; case(115) : // s translate down { translate_y -= 0.03f; } break; case(97) : // a translate left { translate_x -= 0.03f; } break; case(100) : // d translate right { translate_x += 0.03f; } break; case(114) : // r magnify move away { translate_z -= 0.03f; } break; case(102) : // f shrink move toward { translate_z += 0.03f; } break; } //printf("key = %d\n",key); // p pause is 112 } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float trans_speed = 0.0006f; float rotate_speed = 0.002f; // linear speed = a abs(z) + c c = 1, a=speed(5)-1)/ 5 trans_speed = trans_speed*(.8*abs(translate_z) +1.); bool printbutton =false ; float dx, dy; dx = (float)(x - mouse_old_x); //if (abs(dx)>.25) dx =0.0; dy = (float)(y - mouse_old_y); //if (abs(dy)>.25) dy = 0.0; if (mouse_buttons & 1) // Rotate around x and y axis pitch and yaw { rotate_x += dx * rotate_speed; rotate_y += dy * rotate_speed; if (printbutton) printf("mouse button 1 rotate x,y %g %g \n",rotate_x,rotate_y); } else if (mouse_buttons & 2) // magnification z axis move push down on scroll button and move mouse { if (printbutton) printf("mouse button 2 translate %g %g %g\n",translate_x,translate_y,translate_z); translate_z += dy * trans_speed; } else if(mouse_buttons & 4) // Translate side to side or up and down { if (printbutton) printf("mouse button 4 %g %g %g\n",translate_x,translate_y,translate_z); translate_x += dx * trans_speed; translate_y -= dy * trans_speed;} else if(mouse_buttons & 3) { if (printbutton) printf("mouse button 3\n");} else if(mouse_buttons & 0) { if (printbutton) printf("mouse button 0\n");} //else // printf(" else mouse button = %d\n",mouse_buttons); mouse_old_x = x; mouse_old_y = y; } //Fancy cuda kernel can be called using dev_P, dev_MM, dev_DD but define it with local names // move<<< >>> ( pos,dev_P,dev_MM,dev_DD); //////////////////////////////////////////////////////////////////////// /////////////////// move3d /////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// //cuda kernel with four meshes and depth for 3d UVW's read from Field files //uses MM[0:2] for the UVW and MM[2] to provide angle and depth // move3d<<< >>> ( pos,dev_P,dev_MM,dev_DD); __global__ void move3d(float4 *pos, struct PPart *PP,struct MMesh *MM, struct DData *DD, hiprandState_t* states){ // Cuda Kernal to move the particles // loop on all particles using cudaindex and stride // for each particle find i_ele, depth angle findiele // interpolate sigma coordinate, find three corner values, average them to PP[iP].xyz // Did that with all three time steps. Time interpolate // Step PP[iP] position forward. int IpTest=-250; //int DeBuG = false; // true or false /* real stuff now */ double dt_sec=DT_SEC; //float deg2pi = 3.1415926/180.; // Cuda strides int cudaindex = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // Main time loop. Loop CUDA_STEPS times between returns for plotting double time_now = DD[0].time_now; // initialize seed for the random macro defined in main.h: // #define RANDP 987.8353*randP - int(987.8353*randP) -.5 //float randP = abs(time_now/.2348579723 - int(time_now/.2348579723)) ; float randP = (32.235643324*time_now + 3.454)-int(32.235643324*time_now + 3.454); randP = .5555; for (int itime=0; itime<CUDA_STEPS; itime++){ for(int Ip = cudaindex; Ip <NUM_PARTICLES; Ip += stride){ // Update Particle information. PP.age PP.state PP[Ip].age++; float Wf = PP[Ip].WfCore; //*cos((time_now- PP[Ip].WfShft)*PP[Ip].WfFreq ); if (time_now > PP[Ip].Release_time && PP[Ip].state==3 ){ // Wait is over start moving PP[Ip].state = 1; // move PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; } if (PP[Ip].age>60*57600 && PP[Ip].state==1){ // Been moving for 2 days, put into wait mode again at end of line PP[Ip].state = 3; //PP[Ip].age += - MAX_GLPARTICLES +3400; // assuming the first batch was separated by 1 dt each PP[Ip].age = - 10800; // two hours PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; } /**/ //This thing was broken, needed to set the i_ele to starting value if (PP[Ip].state == 2 && false) { // grounded particle start right up again PP[Ip].state = 1; // move PP[Ip].age = 0; // Start right up, with age measured from old Release_time + duration it was moving // ie age is always time_now-time_init -Release_time PP[Ip].Release_time = time_now-MM[0].time_init; PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; PP[Ip].i_ele = 55; for (int i=0; i<4; i++) PP[Ip].i_ele4[i] = 55; } /**/ // move particle if (PP[Ip].state == 1 ) { // move particle // Find surrounding triangle of Particle for all three meshes // PP[Ip].i_ele4[iMM] // PP[Ip].factor4[iMM][0:2] for (int iMM=0; iMM<3; iMM++) { findiele(Ip,iMM,PP,MM); } PP[Ip].answer[0]=0.0; if (Ip==IpTest && itime==0) printf(" move3d finished findiele %d\n",itime); // interpolate values for angle and depth at PP[Ip].x,y float VAR[3]; int iMM=2; // mesh for w, angle and depth for (int i=0; i<3; i++) { // i_ele is the element, ele[i_ele[0:2] are the nodes at corners of triangle i_ele long elei = MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]; VAR[i]=MM[iMM].ANGLE[elei]; } if (Ip==IpTest && itime==0) printf("move3d before Interpolate2D findiele itime= %d, time_now= %fs %gh\n",itime,time_now,time_now/3600.); Interpolate2D(Ip,iMM,PP,VAR); float angle=PP[Ip].answer[0]; //or iMM=2; for (int i=0; i<3; i++) VAR[i]=MM[iMM].depth[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; if (Ip==IpTest) printf(" depths = %g %g %g \n",VAR[0],VAR[1],VAR[2]); Interpolate2D(Ip,iMM,PP,VAR); float depth=PP[Ip].answer[0]; if (Ip==IpTest && itime==0) printf("move3d after Interpolate2D angle[%d]=%g depth=%g\n",Ip,angle,depth); // Find zeta, sea surface. // Pick out the three DD's to interpolate in time int DDT0=DD[0].DD3[0]; int DDT1=DD[0].DD3[1]; int DDT2=DD[0].DD3[2]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT0].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z0=PP[Ip].answer[0]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT1].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z1=PP[Ip].answer[0]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT2].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z2=PP[Ip].answer[0]; float time_frac=(time_now - DD[DDT0].time)/(DD[DDT2].time - DD[DDT0].time); //float a = 2.*vart[2] -4.*vart[1] +2.*vart[0]; //float b = - vart[2] +4.*vart[1] -3.*vart[0]; //float c = vart[0]; //float Upnow = a*time_frac*time_frac + b*time_frac + c; float ZETA = ( 2.*Z2 -4.*Z1 +2.*Z0)*time_frac*time_frac +( -Z2 +4.*Z1 -3*Z0)*time_frac +( Z0); PP[Ip].Sigma = (ZETA- PP[Ip].z_present)/(ZETA+depth) ; //if ((Ip==17150 || Ip==17151) && itime<2) { // printf("ZETA SIGMA Test i=%d Z= %g, ZETA=%g d=%g Sigma=%g\n" // ,Ip, PP[Ip].z_present, ZETA,depth,PP[Ip].Sigma); } // Find the isigmap, isigmam and sigmafrac // U[iP] = U[isigmap]*sigmafrac +U[isigmam]*(1.0-sigmafrac) // do three times and use timefrac to produce final VAR[3] at corners // iMM = 0U 1V 2W // Only works for UVW. In future add special cases 3T 4S iMM=iMM-1 or -2 iMM=0; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA,time_now); float Up=PP[Ip].answer[0]; iMM=1; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA,time_now); float Vp=PP[Ip].answer[0]; float cosa = cos(angle); float sina = sin(angle); float Upnow = cosa*Up -sina*Vp; float Vpnow = sina*Up +cosa*Vp; iMM=2; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA, time_now); float Wpnow=PP[Ip].answer[0]; if (Ip==IpTest && itime==0) printf("move3d after sigma UVp[%d]= %g %g UVWpnow= %g, %g, %g angle=%g depth=%g\n" ,Ip,Up,Vp,Upnow, Vpnow, Wpnow,angle,depth); /* Now have time and space interpolates of U,V,W for particle */ /* Apply them to the particle coordinates and done! (unless temporal runge kutta is needed. Running goofy small time steps)*/ float KH = MM[0].KH; // Random jiggle 100 / sqrt(3600/1.5) So 3600/1.5 * KH = 100 float KV = MM[0].KV; // Random jiggle 100 / sqrt(3600/1.5) So 3600/1.5 * KH = 100 // KH and KV contain the sqrt(DT_SEC) for time stepping random walk randP = hiprand_normal(&states[cudaindex]); PP[Ip].x_present += dt_sec*(Upnow*1.) +(randP-.0)*KH; randP = hiprand_normal(&states[cudaindex]); PP[Ip].y_present += dt_sec*(Vpnow*1.) +(randP-.0)*KH; randP = hiprand_normal(&states[cudaindex]); PP[Ip].z_present += dt_sec*Wpnow*1.0 +Wf +(randP-.0)*KV; PP[Ip].z_present = min(PP[Ip].z_present, ZETA ); // if z_p is above -0.01 PP[Ip].z_present = max(PP[Ip].z_present, -depth); // if z_p is below -depth //if (Ip==17150 && itime<2) { // printf("i=%d ZETA=%g, z_pre=%g\n" // ,Ip, ZETA, PP[Ip].z_present); } // end of if PP[Ip].state = 1 moving particle updated } // End of Particle loop on all Ip } // End of a time step, increment to next time_now += dt_sec; // if time_frac >1, then it will fall out of the loop and not increment PP.timenow time_now+=dt_sec; } // Update the VBO pos[] for(int Ip = cudaindex; Ip <NUM_PARTICLES; Ip += stride){ int Ipx = Ip%MAX_GLPARTICLES; // Not too many and only from moveable points if (PP[Ip].state == 0) {// white boundary // Set ColorClass to 1.0 pos[Ipx] = make_float4(PP[Ip].x_present,PP[Ip].y_present,PP[Ip].z_present, 0.0f); } else if(PP[Ip].state == 2) {// Aground place at zero zero origin // Set ColorClass to 1.0 pos[Ipx] = make_float4(0.0f,0.0f,0.05f, 1.0f); } else if(PP[Ip].state == 1) {// regular moving point // Set ColorClass to float value between 0.0-6.0 // To accommodate states 0 and 2, add 2.0 to push to 2-8 // all modes work with shaderpipe.vs float ColorClass; double NumColors = 6.; if (MM[0].color_mode == 0) { // ColorByRelease double agesec = PP[Ip].Release_time - MM[0].time_init; ColorClass = (agesec/MM[0].age_class) ; if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 1) { // ColorByAge double agesec = time_now- PP[Ip].Release_time; ColorClass = (agesec/MM[0].age_class) ; //% NumColors; if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 2) { // ColorByPulse double agesec = PP[Ip].Release_time - MM[0].time_init; //ColorClass = floor(agesec/(MM[0].pulse_spacing)) ; //% NumColors; ColorClass = agesec/(10.*MM[0].pulse_spacing) ; //% NumColors; while (ColorClass > NumColors) ColorClass-=NumColors; //% NumColors; } else if (MM[0].color_mode == 3) {// ColorByDepth // with Zeta depth can be positive. Make sure code is greater than 1.0 , // 1.0 -> 0.0 interval should be first colorclass. ColorClass = max(1.0-(PP[Ip].z_present/MM[0].depthcolorinterval),0.0); if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 4) {// ColorByOrigin // Really just color by Latitude of XYZstart with offset and scaling in meters ColorClass = (PP[Ip].XYZstart[1]+315000.)/10000.; while (ColorClass < 0.0) ColorClass+=10000.; //% NumColors; while (ColorClass > NumColors) ColorClass-=NumColors; //% NumColors; } else if (MM[0].color_mode == 5) {// ColorBySigma // Sigma values expanded out to 1 : 6. to give 1. 1.5 2. 2.5 etc.. ColorClass = (PP[Ip].Sigma)*NumColors; if (ColorClass < 0.0) ColorClass=0.; //% NumColors; if (ColorClass > NumColors) ColorClass=NumColors; //% NumColors; } // Add 2 to ColorClass so that cases for 0 and 1 are accommodated pos[Ipx] = make_float4(PP[Ip].x_present,PP[Ip].y_present,PP[Ip].z_present, ColorClass+2. ); } } // end of move() if ( cudaindex==0) DD[0].time_now = time_now; // Only update dev_DD[] once // Hopefully the other cudas have gotten started by now and don't need to read dev_DD[0].time_now } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// // update which triangle the iP is in for the three meshes iMM 012 UVW // note the iMM=2 also gives iele and factors for ANGLE and depth // Sets PP[Ip].i_ele4[iMM] and the factors PP[Ip].factor4[iMM][0:2] // for (in iMM=0; iMM<3; iMM++) __device__ void findiele(int Ip,int iMM,struct PPart *PP, struct MMesh *MM) { int i_ele, keepgoing, k; float xpart, ypart; float smallest_value = -0.05000; // -.01 -0.001; // Find surrounding triangle of Particle i_ele = PP[Ip].i_ele4[iMM]; xpart = PP[Ip].x_present; ypart = PP[Ip].y_present; //return; //if(Ip==0) printf(" start findiele i_ele=%d \n",i_ele); // Check for out of domain/ grounded particle // do work if in-domain else increment igrounded and skip main part of move if (i_ele >= 0 && PP[Ip].state==1) { keepgoing = 1; while (keepgoing > 0 ){ // if any of the f's are negative, walk that way and restart while loop k=0; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value) { i_ele = MM[iMM].tri_connect[i_ele][0]; } else { k=1; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value ) { i_ele = MM[iMM].tri_connect[i_ele][1] ; } else { k=2; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value ) { i_ele = MM[iMM].tri_connect[i_ele][2] ; } else { // Found it, iele, all f's are positive keepgoing = 0; } } } if (i_ele < 0) { // newly grounded particle, zero him out. PP[Ip].state = 2; // set state = grounded PP[Ip].factor4[iMM][0]=0.0; PP[Ip].factor4[iMM][1]=0.0; PP[Ip].factor4[iMM][2]=0.0; PP[Ip].i_ele4[iMM] = i_ele; keepgoing = 0; } if (keepgoing>0) keepgoing++; if (keepgoing > 7000) { printf(" k%d ",Ip); PP[Ip].state = 2; // set state = grounded i_ele=-1; PP[Ip].i_ele4[iMM] = -1; PP[Ip].x_present=0.0; PP[Ip].y_present=0.0; PP[Ip].z_present=0.0; keepgoing=0;} } //return; if (i_ele>=0){ // good particle still in the mesh PP[Ip].i_ele4[iMM]=i_ele;} // end of while keepgoing // did it finish in a good element? if not !good ground it. // if (MM[iMM].goodele[i_ele]) PP[Ip].state = 2; } return; } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// // find 2d interpolated ANGLE(icase==0), depth(icase==1) // input is X,Y, A of i_ele points along with factor4 // MM[iMM].X[i_ele4[0:2]] MM[iMM].Y[i_ele4[0:2]] MM[iMM].ANGLE[i_ele4[0:2]] // PP[Ip].factor4[iMM][i_ele[0:]] // 2Dinterpolate(Ip,iMM,PP,MM,icase); // icase = 0U, 1V, 2W, 3ANGLE, 4depth // maybe do VAR[3] = MM[iMM].ANGLE[i_ele4[0:2]] instead of icase // That way we can feed it the vertical interpolates of UVW[3] //float VAR[3]; //iMM=3; for (int i=0; i<3; i++) VAR[i]=MM[iMM].angle[PP[Ip].iele4[iMM][i]]; //float angle = 2Dinterpolate(Ip,iMM,PP,MM,VAR); //iMM=4; for (int i=0; i<3; i++) VAR[i]=MM[iMM].depth[PP[Ip].iele4[iMM][i]]; //float depth = 2Dinterpolate(Ip,iMM,PP,MM,VAR); __device__ void Interpolate2D(int Ip, int iMM, struct PPart *PP, float *VAR) { float factor0=PP[Ip].factor4[iMM][0]; float factor1=PP[Ip].factor4[iMM][1]; float factor2=PP[Ip].factor4[iMM][2]; PP[Ip].answer[0] = factor0*VAR[0]+factor1*VAR[1]+factor2*VAR[2]; } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// __device__ void Interpolatesigma(int Ip, int iMM, struct PPart *PP, struct DData *DD, struct MMesh *MM, float depth, float ZETA, float time_now ) { // Find the isigmap, isigmam and sigmafrac // U[iP] = U[isigmap]*sigmafrac +U[isigmam]*(1.0-sigmafrac) // do three times and use timefrac to produce final VAR[3] at corners // iMM = 0U 1V 2W // Only works for UVW. In future add special cases 3T 4S iMM=iMM-1 or -2 int DDT0, DDT2; int IpTest = -250; int i_ele = PP[Ip].i_ele4[iMM]; float vart[3]; float var[3]; int sp, sm; float sigIp = max( min(PP[Ip].z_present / (depth-ZETA) , -0.01) , -0.99); // 0 to -1.0 //float sigIp = PP[Ip].z_present / depth ; // 0 to -1.0 // count up in sp to walk down in depth sp=1; while(MM[iMM].sigma[sp]< sigIp) sp++; // increment if sp is still above sigIp sm = sp-1; // sp is below sigIp, sm is above float sigfrac = (sigIp-MM[iMM].sigma[sp])/(MM[iMM].sigma[sm]- MM[iMM].sigma[sp]); // Pick out the three DD's to interpolate in time int DD3[3]; DD3[0]=DD[0].DD3[0]; DD3[1]=DD[0].DD3[1]; DD3[2]=DD[0].DD3[2]; int DDT0=DD3[0]; //DDT1=DD3[1]; int DDT2=DD3[2]; if (Ip==IpTest ) printf(" start of interpretsigma iMM=%d z_present= %g /depth=%g =sigIP = %g \n sm,sp sigma[%d]=%g sigma[%d]=%g sigIP %g sigfrac %g\n" ,iMM,PP[Ip].z_present, depth,sigIp,sm,MM[iMM].sigma[sm],sp,MM[iMM].sigma[sp],sigIp,sigfrac); // loop on time DD3[i] // loop on three corners ele[i_ele][j] // average sm and sp at the corner for (int it=0; it<3; it++){ // time loop for DD3[it] for (int j=0; j<3; j++){ // loop around corners to get sigma averaged variable long ele0=MM[iMM].ele[i_ele][j]; if (iMM==0){ // U var[j] = DD[DD3[it]].U[sm][ele0]*sigfrac + DD[DD3[it]].U[sp][ele0]* (1.0 - sigfrac); } else if (iMM==1){ // V var[j] = DD[DD3[it]].V[sm][ele0]*sigfrac + DD[DD3[it]].V[sp][ele0]* (1.0 - sigfrac); } else if (iMM==2){ // W var[j] = DD[DD3[it]].W[sm][ele0]*sigfrac + DD[DD3[it]].W[sp][ele0]* (1.0 - sigfrac); } else { printf(" \n\n Bad iMM in Interpolatesigma %d\n\n",iMM); } } // Have sigma average var[0:2] at the three corners //interpolate to center, to get three time increments vart[0:2] vart[it]= PP[Ip].factor4[iMM][0]*var[0] + PP[Ip].factor4[iMM][1]*var[1] + PP[Ip].factor4[iMM][2]*var[2]; if (Ip==IpTest ) printf(" intersig DD3=%d var=%g %g %g vart=%g\n " ,DD3[it],var[0],var[1],var[2],vart[it]); } // Finally interpolate in time to get final answer for U, V, W to mover PP[Ip] // float time_now = DD[0].time_now; // Will use dev_DD after the first pass with new DD float time_frac=(time_now - DD[DDT0].time)/(DD[DDT2].time - DD[DDT0].time); //float a = 2.*vart[2] -4.*vart[1] +2.*vart[0]; //float b = - vart[2] +4.*vart[1] -3.*vart[0]; //float c = vart[0]; //float Upnow = a*time_frac*time_frac + b*time_frac + c; float Upnow = ( 2.*vart[2] -4.*vart[1] +2.*vart[0])*time_frac*time_frac +(- vart[2] +4.*vart[1] -3.*vart[0])*time_frac +( vart[0]); /* Now have time sigma and space interpolates of U,V,W for particle */ PP[Ip].answer[0] = Upnow; if (Ip==IpTest ) printf(" intersigend timenow=%fs timefrac=%g Upnow=%g\n ",time_now,time_frac,Upnow); }
e3ca6336eaf82125e92c68776749901507256658.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ /* ChesPartGL.h and ChesPartGL.cu are included at top of mainpart.cu #include "ChesPartGL.h" */ #include <typeinfo> // MakeShader.cpp bool ReadFileNew(string pFileName, string& outFile); static void AddShader(GLuint ShaderProgram, const char* pShaderText, GLenum ShaderType); void CompileShaders(string vs, string fs); void transmatrix(float *matrix, float tx,float ty, float tz); // flag for pausing motion in cudamove bool runaction=true; bool getRunAction(){ return runaction; } // thread method to be developed for disk reading //std::thread t12 ; void UpDData(); int DDataNum = 10; curandState_t* states; void UpDData() { // Thread programme to run continuously in background // Will change value of a global counter while(DDataNum>=0) { if (DDataNum==0) { // DDataNum == 0 is flag to cause a full datafile read to DD[0].DD3[3] printf("\n\n ******************Read DDataFile to DD3[3] = %d ",DD[0].DD3[3]); string newername; if (DD[0].IsFVCOM) { newername = NetCDFfiledateG(DD[0].filetemplate,DD); ReadFieldNetCDFG(newername,DD[0].DD3[3],DD,MM); } else { newername = NetCDFfiledate(DD[0].filetemplate,DD); ReadFieldNetCDF(newername,DD[0].DD3[3],DD,MM); } printf("***************** Finished Read DDataFile \n"); cout<< newername << endl<<endl; } DDataNum+=1; //printf(" display i=%d, DDataNum=%d \n",i, DDataNum); this_thread::sleep_for(chrono::milliseconds(2) ); } } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } //char fps[256]; //sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); //glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL() { int argc = 1; char *argv[1] = {(char*)"Something"}; glutInit(&argc, argv); //glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Regular NetCDF Particle Tracking"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions if (! isGLVersionSupported(2,0)) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } // default initialization // background color 234,255,199,1 is noaa map color //glClearColor(234./256,225./256.,199./256.,0.0); // NOAA color glClearColor(234./256.-.2,225./256.-.2,199./256.-.2,0.0); // Darker NOAA color //glClearColor(0.,0.,0.,1.0); // black or grey: 0.0050, 0.005, 0.0050, 1.0); glColor4f(0.0,1.0,0.0,1.0); // set color //glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 20.0); // near and far clipping planes .1,10. SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// // Initialize a few gl, timer and cuda // then start gl looping to call function display //////////////////////////////////////////////////////////////////////////////// bool GLmoveparticle(struct PPart *PP, struct MMesh *MM, struct DData *DD) { //, struct CCcontrol *CC) //int DD3[4]; // Create the CUTIL timer sdkCreateTimer(&timer); g_time_now = (DD[0].time + DD[1].time)/2.0; Dot_Size = MM[0].Dot_Size; //initial the cudaDevice to use, as if there is a choice? cudaDeviceProp deviceProp; int devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors(cudaSetDevice(devID)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n" , devID, deviceProp.name, deviceProp.major, deviceProp.minor); // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. //if (false == initGL(&argc, argv)) if (false == initGL()) { return false; } // register callbacks. these are locally defined functions glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); #if defined (__APPLE__) || defined(MACOSX) atexit(cleanup); #else glutCloseFunc(cleanup); #endif // create VBO createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard); // routine in MakeShader.cpp Creates gWVPLocation for use by RenderSceneCB() CompileShaders(MM[0].shadervs, MM[0].shaderfs); // Launch UpDData() threaded. Will run in background till end of time std::thread t(UpDData ); //std::thread t1g(ReadFieldNetCDFG, std::ref(newername),std::ref(DD[0].DD3[3]), // std::ref(DD),std::ref(MM) ); // run the cuda part from routine display // specified in glutDisplayFunc(display); // which is triggered by glutMainLoop //runCuda(&cuda_vbo_resource); // start rendering mainloop printf(" Start glutMainLoop >display>runCuda \n\n"); glutMainLoop(); printf(" Return from glutMainLoop\n"); // } return true; } /* this GPU kernel function is used to initialize the random states */ __global__ void initcurand(unsigned int seed, curandState_t* states) { int cudaindex = threadIdx.x + blockIdx.x * blockDim.x; /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ cudaindex, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[cudaindex]); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation, called from display //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { //printf("TFG runCuda host_P[10].x_present %g\n",host_P[10].x_present); // map OpenGL buffer object for writing from CUDA float4 *dptr; //float4 *dptr2; float time_now; size_t DDSizeGeneral = sizeof(DData)*4; size_t MMSizeGeneral = sizeof(MMesh)*4; if (iDD==-1){ // First update, need to localize DD, MM only once // initialized in ChesPartGL.h, global to this file printf("\n runCuda First Pass\n"); try { printf(" Can I print DD[0].time_now %g\n",DD[0].time_now); } catch (const std::runtime_error& e){ printf(" Error on print DD[0].time_now Message: %s\n",e.what()); } cudaMemcpy(DD, dev_DD,DDSizeGeneral,cudaMemcpyDeviceToHost); cudaMemcpy(MM, dev_MM,MMSizeGeneral,cudaMemcpyDeviceToHost); printf(" After cudaMemcpy DD[0].time_now %fsec %f hr\n",DD[0].time_now,DD[0].time_now/3600.); iDD=0; // Initialize the damn random number generator // outside curandState_t* states; /* allocate space on the GPU for the random states */ cudaMalloc((void**) &states, 256*64* sizeof(curandState_t)); initcurand<<<256,64>>>(16343, states); } checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0)); //1 size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); // *vbo_resource //checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr2, &num_bytes, //vbo_resource[1])); //int DD33=1; 256,64 72*32 = 2304 cuda cores // 144,32 all cudas enumerated once move3d<<< 144,32 >>>(dptr,dev_P,dev_MM,dev_DD,states); cudaDeviceSynchronize(); DD[0].time_now += CUDA_STEPS* DT_SEC; // 0.01f; time_now = DD[0].time_now; //printf("After cuda move3d time_now = %fs %ghr\n",time_now, time_now/3600.); float time_frac=(time_now - DD[DD[0].DD3[0]].time)/(DD[DD[0].DD3[2]].time - DD[DD[0].DD3[0]].time); bool timetest = (time_frac > .75); // Dummy counter reset of UpDData flag //if (DDataNum>1000) { // printf("\n\n\n ****************\n RunCuda reset of DDataNum=%d\n ****************\n\n\n",DDataNum); // DDataNum=0; //} if (timetest ){ // Every hour a new data file is needed. Read dev_DD to obtain time_now // Assume or test that the fourth ReadData thread is finished and move to dev_DD cudaMemcpy(dev_DD,DD,DDSizeGeneral,cudaMemcpyHostToDevice); // Update DD3 for (int i=0; i<4 ; i++)DD[0].DD3[i]=(DD[0].DD3[i]+1)%4; // DD3[3] is next spot to be updated, will be updated in this section // Thread this off to execute while elsewhere. // printf(" DD[# 1].time = %g %g %g %g\n",DD[0].time/3600.,DD[1].time/3600.,DD[2].time/3600.,DD[3].time/3600.); DD[0].ToDay +=3600; // for hourly files string newername; /* if (DD[0].IsFVCOM) {newername = NetCDFfiledateG(DD[0].filetemplate,DD);} else {newername = NetCDFfiledate(DD[0].filetemplate,DD);} //string newername = NetCDFfiledate(DD[0].filetemplate,DD); cout<< newername << endl; //char fps[256]; //strftime(fps,80, "Chesapeake Bay %A %G %b %d %r ", gmtime(&DD[0].ToDay)); //strftime(&fps[80],80, "more Time= %F %R.", gmtime(&MM[0].ToDay)); //glutSetWindowTitle(fps); bool RunThreadRead = false; if (RunThreadRead) { if (DD[0].IsFVCOM) {std::thread t1g(ReadFieldNetCDFG, std::ref(newername),std::ref(DD[0].DD3[3]), std::ref(DD),std::ref(MM) ); t1g.join(); // Wait here for thread to finish. Makes threading moot. Testing only. } else { //std::thread & t1; printf(" thread start t1(ReadFieldNetCDF \n"); std::thread t1(ReadFieldNetCDF, std::ref(newername),std::ref(DD[0].DD3[3]), std::ref(DD),std::ref(MM) ); //printf(" thread after t1(ReadFieldNetCDF \n"); t1.join(); // Wait here for thread to finish. Makes threading moot. Testing only. //t1.detach(); // Let it loose, but with no test for finished crashes //printf(" thread after t1.join() \n"); //std::thread t2(ReadFieldNetCDF, std::ref(newername),std::ref(DD[0].DD3[3]), //std::ref(DD),std::ref(MM) ); //t2.join(); //printf("after second join\n"); } } else { if (DD[0].IsFVCOM) { ReadFieldNetCDFG(newername,DD[0].DD3[3],DD,MM); } else { ReadFieldNetCDF(newername,DD[0].DD3[3],DD,MM); } } */ // Reset of UpDData flag to cause call to read data by threaded UpDData() DDataNum=0; printf("\n\n\n ****************\n RunCuda DDataNum=%d\n ****************\n\n\n",DDataNum); float dhr=3600.; printf(" DD[ 0:3].time = %g %g %g %g\n",DD[0].time/dhr,DD[1].time/dhr,DD[2].time/dhr,DD[3].time/dhr); iDD+=1; printf(" iDD = %d time_now=%g\n\n",iDD,time_now/dhr); } // End of hourly DD update char fps[256]; time_t tnow; tnow =DD[0].ToDay; tnow = MM[0].Time_Init + time_now -MM[0].time_init; //strftime(fps,80, "Chesapeake Bay3 %A %G %b %d %I:%M %R ", gmtime(&DD[0].ToDay)); strftime(fps,80, "Chesapeake Bay2 %A %G %b %d %R ", gmtime(&tnow)); //strftime(&fps[80],80, "more Time= %F %R.", gmtime(&DD[0].ToDay)); glutSetWindowTitle(fps); // unmap buffer object checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(2, vbo); ////////////////////buffer number [0] //glBindBuffer(GL_ARRAY_BUFFER, *vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo[0]); // initialize buffer object //unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); unsigned size = MAX_GLPARTICLES *4*sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA //checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); /* */ ////////////////////buffer number [1] //glBindBuffer(GL_ARRAY_BUFFER, *vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo[1]); // initialize buffer object //size = mesh_width * mesh_height * 4 * sizeof(float); size = MAX_GLPARTICLES *4*sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); //glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); /* */ SDK_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions if (getRunAction()) runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); //glMatrixMode(GL_PROJECTION); glLoadIdentity(); GLfloat idmatrix[16] = {1.0,0.,0.,0.,0.,1.,0.,0.,0.,0.,1.,0.,0.,0.,0.,1.}; //Identity float *newmatrix; newmatrix = matrix_RotTrPer(idmatrix, rotate_y,rotate_x,0.0, translate_x,translate_y,translate_z, (GLfloat)window_width, (GLfloat) window_height, znear, zfar, 30.0); // gWVPLocation points to the variable "gWVP" in the shader.vs as 4x4matrix glUniformMatrix4fv(gWVPLocation, 1, GL_FALSE, newmatrix); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glPointSize(Dot_Size); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LESS); // default was GL_LESS which gave backwards glEnable(GL_POINT_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); //glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDrawArrays(GL_POINTS, 0, MAX_GLPARTICLES); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { switch (key) { case (27) : #if defined(__APPLE__) || defined(MACOSX) exit(EXIT_SUCCESS); #else glutDestroyWindow(glutGetWindow()); return; #endif case (112) : // p pause { // Set flag to stop cuda move, but keep refreshing screen for mousing // p pause action toggles runaction if (runaction){ runaction=false;} else { runaction=true;} cout <<" Key = "<<key << " runaction "<<runaction<< endl; } break ; case (104) : // h help menu {printf("\nesc = stop\n p = toggle pause\n h = this help\n j = narrow view plane \n k = expand view plane\n"); printf(" Move: w^ s. a< d> \n");} break; case (106) : // j reset view Set matrix in display(). Doesn't really work too good. oh well. { znear = 1.0; zfar = 15.0; //printf(" j narrow view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 1.0, 2.); // near and far clipping planes .1,10. or .1,20. } break; case (107) : // k znear contract view { znear += 0.1; // zfar -= 1.0; //printf(" k contract znear view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, .1, 20.); } break; case (108) : // l zfar contract view { // znear += 0.1; zfar -= 1.0; //printf(" l contract zfar view plane %g %g\n",znear, zfar); //gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, .1, 20.); } break; case(119) : // w translate up { translate_y += 0.03f; } break; case(115) : // s translate down { translate_y -= 0.03f; } break; case(97) : // a translate left { translate_x -= 0.03f; } break; case(100) : // d translate right { translate_x += 0.03f; } break; case(114) : // r magnify move away { translate_z -= 0.03f; } break; case(102) : // f shrink move toward { translate_z += 0.03f; } break; } //printf("key = %d\n",key); // p pause is 112 } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float trans_speed = 0.0006f; float rotate_speed = 0.002f; // linear speed = a abs(z) + c c = 1, a=speed(5)-1)/ 5 trans_speed = trans_speed*(.8*abs(translate_z) +1.); bool printbutton =false ; float dx, dy; dx = (float)(x - mouse_old_x); //if (abs(dx)>.25) dx =0.0; dy = (float)(y - mouse_old_y); //if (abs(dy)>.25) dy = 0.0; if (mouse_buttons & 1) // Rotate around x and y axis pitch and yaw { rotate_x += dx * rotate_speed; rotate_y += dy * rotate_speed; if (printbutton) printf("mouse button 1 rotate x,y %g %g \n",rotate_x,rotate_y); } else if (mouse_buttons & 2) // magnification z axis move push down on scroll button and move mouse { if (printbutton) printf("mouse button 2 translate %g %g %g\n",translate_x,translate_y,translate_z); translate_z += dy * trans_speed; } else if(mouse_buttons & 4) // Translate side to side or up and down { if (printbutton) printf("mouse button 4 %g %g %g\n",translate_x,translate_y,translate_z); translate_x += dx * trans_speed; translate_y -= dy * trans_speed;} else if(mouse_buttons & 3) { if (printbutton) printf("mouse button 3\n");} else if(mouse_buttons & 0) { if (printbutton) printf("mouse button 0\n");} //else // printf(" else mouse button = %d\n",mouse_buttons); mouse_old_x = x; mouse_old_y = y; } //Fancy cuda kernel can be called using dev_P, dev_MM, dev_DD but define it with local names // move<<< >>> ( pos,dev_P,dev_MM,dev_DD); //////////////////////////////////////////////////////////////////////// /////////////////// move3d /////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// //cuda kernel with four meshes and depth for 3d UVW's read from Field files //uses MM[0:2] for the UVW and MM[2] to provide angle and depth // move3d<<< >>> ( pos,dev_P,dev_MM,dev_DD); __global__ void move3d(float4 *pos, struct PPart *PP,struct MMesh *MM, struct DData *DD, curandState_t* states){ // Cuda Kernal to move the particles // loop on all particles using cudaindex and stride // for each particle find i_ele, depth angle findiele // interpolate sigma coordinate, find three corner values, average them to PP[iP].xyz // Did that with all three time steps. Time interpolate // Step PP[iP] position forward. int IpTest=-250; //int DeBuG = false; // true or false /* real stuff now */ double dt_sec=DT_SEC; //float deg2pi = 3.1415926/180.; // Cuda strides int cudaindex = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // Main time loop. Loop CUDA_STEPS times between returns for plotting double time_now = DD[0].time_now; // initialize seed for the random macro defined in main.h: // #define RANDP 987.8353*randP - int(987.8353*randP) -.5 //float randP = abs(time_now/.2348579723 - int(time_now/.2348579723)) ; float randP = (32.235643324*time_now + 3.454)-int(32.235643324*time_now + 3.454); randP = .5555; for (int itime=0; itime<CUDA_STEPS; itime++){ for(int Ip = cudaindex; Ip <NUM_PARTICLES; Ip += stride){ // Update Particle information. PP.age PP.state PP[Ip].age++; float Wf = PP[Ip].WfCore; //*cos((time_now- PP[Ip].WfShft)*PP[Ip].WfFreq ); if (time_now > PP[Ip].Release_time && PP[Ip].state==3 ){ // Wait is over start moving PP[Ip].state = 1; // move PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; } if (PP[Ip].age>60*57600 && PP[Ip].state==1){ // Been moving for 2 days, put into wait mode again at end of line PP[Ip].state = 3; //PP[Ip].age += - MAX_GLPARTICLES +3400; // assuming the first batch was separated by 1 dt each PP[Ip].age = - 10800; // two hours PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; } /**/ //This thing was broken, needed to set the i_ele to starting value if (PP[Ip].state == 2 && false) { // grounded particle start right up again PP[Ip].state = 1; // move PP[Ip].age = 0; // Start right up, with age measured from old Release_time + duration it was moving // ie age is always time_now-time_init -Release_time PP[Ip].Release_time = time_now-MM[0].time_init; PP[Ip].x_present = PP[Ip].XYZstart[0]; PP[Ip].y_present = PP[Ip].XYZstart[1]; PP[Ip].z_present = PP[Ip].XYZstart[2]; PP[Ip].i_ele = 55; for (int i=0; i<4; i++) PP[Ip].i_ele4[i] = 55; } /**/ // move particle if (PP[Ip].state == 1 ) { // move particle // Find surrounding triangle of Particle for all three meshes // PP[Ip].i_ele4[iMM] // PP[Ip].factor4[iMM][0:2] for (int iMM=0; iMM<3; iMM++) { findiele(Ip,iMM,PP,MM); } PP[Ip].answer[0]=0.0; if (Ip==IpTest && itime==0) printf(" move3d finished findiele %d\n",itime); // interpolate values for angle and depth at PP[Ip].x,y float VAR[3]; int iMM=2; // mesh for w, angle and depth for (int i=0; i<3; i++) { // i_ele is the element, ele[i_ele[0:2] are the nodes at corners of triangle i_ele long elei = MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]; VAR[i]=MM[iMM].ANGLE[elei]; } if (Ip==IpTest && itime==0) printf("move3d before Interpolate2D findiele itime= %d, time_now= %fs %gh\n",itime,time_now,time_now/3600.); Interpolate2D(Ip,iMM,PP,VAR); float angle=PP[Ip].answer[0]; //or iMM=2; for (int i=0; i<3; i++) VAR[i]=MM[iMM].depth[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; if (Ip==IpTest) printf(" depths = %g %g %g \n",VAR[0],VAR[1],VAR[2]); Interpolate2D(Ip,iMM,PP,VAR); float depth=PP[Ip].answer[0]; if (Ip==IpTest && itime==0) printf("move3d after Interpolate2D angle[%d]=%g depth=%g\n",Ip,angle,depth); // Find zeta, sea surface. // Pick out the three DD's to interpolate in time int DDT0=DD[0].DD3[0]; int DDT1=DD[0].DD3[1]; int DDT2=DD[0].DD3[2]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT0].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z0=PP[Ip].answer[0]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT1].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z1=PP[Ip].answer[0]; iMM=2; for (int i=0; i<3; i++) VAR[i]=DD[DDT2].zeta[MM[iMM].ele[PP[Ip].i_ele4[iMM]][i]]; Interpolate2D(Ip,iMM,PP,VAR); float Z2=PP[Ip].answer[0]; float time_frac=(time_now - DD[DDT0].time)/(DD[DDT2].time - DD[DDT0].time); //float a = 2.*vart[2] -4.*vart[1] +2.*vart[0]; //float b = - vart[2] +4.*vart[1] -3.*vart[0]; //float c = vart[0]; //float Upnow = a*time_frac*time_frac + b*time_frac + c; float ZETA = ( 2.*Z2 -4.*Z1 +2.*Z0)*time_frac*time_frac +( -Z2 +4.*Z1 -3*Z0)*time_frac +( Z0); PP[Ip].Sigma = (ZETA- PP[Ip].z_present)/(ZETA+depth) ; //if ((Ip==17150 || Ip==17151) && itime<2) { // printf("ZETA SIGMA Test i=%d Z= %g, ZETA=%g d=%g Sigma=%g\n" // ,Ip, PP[Ip].z_present, ZETA,depth,PP[Ip].Sigma); } // Find the isigmap, isigmam and sigmafrac // U[iP] = U[isigmap]*sigmafrac +U[isigmam]*(1.0-sigmafrac) // do three times and use timefrac to produce final VAR[3] at corners // iMM = 0U 1V 2W // Only works for UVW. In future add special cases 3T 4S iMM=iMM-1 or -2 iMM=0; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA,time_now); float Up=PP[Ip].answer[0]; iMM=1; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA,time_now); float Vp=PP[Ip].answer[0]; float cosa = cos(angle); float sina = sin(angle); float Upnow = cosa*Up -sina*Vp; float Vpnow = sina*Up +cosa*Vp; iMM=2; Interpolatesigma(Ip, iMM, PP, DD, MM, depth,ZETA, time_now); float Wpnow=PP[Ip].answer[0]; if (Ip==IpTest && itime==0) printf("move3d after sigma UVp[%d]= %g %g UVWpnow= %g, %g, %g angle=%g depth=%g\n" ,Ip,Up,Vp,Upnow, Vpnow, Wpnow,angle,depth); /* Now have time and space interpolates of U,V,W for particle */ /* Apply them to the particle coordinates and done! (unless temporal runge kutta is needed. Running goofy small time steps)*/ float KH = MM[0].KH; // Random jiggle 100 / sqrt(3600/1.5) So 3600/1.5 * KH = 100 float KV = MM[0].KV; // Random jiggle 100 / sqrt(3600/1.5) So 3600/1.5 * KH = 100 // KH and KV contain the sqrt(DT_SEC) for time stepping random walk randP = curand_normal(&states[cudaindex]); PP[Ip].x_present += dt_sec*(Upnow*1.) +(randP-.0)*KH; randP = curand_normal(&states[cudaindex]); PP[Ip].y_present += dt_sec*(Vpnow*1.) +(randP-.0)*KH; randP = curand_normal(&states[cudaindex]); PP[Ip].z_present += dt_sec*Wpnow*1.0 +Wf +(randP-.0)*KV; PP[Ip].z_present = min(PP[Ip].z_present, ZETA ); // if z_p is above -0.01 PP[Ip].z_present = max(PP[Ip].z_present, -depth); // if z_p is below -depth //if (Ip==17150 && itime<2) { // printf("i=%d ZETA=%g, z_pre=%g\n" // ,Ip, ZETA, PP[Ip].z_present); } // end of if PP[Ip].state = 1 moving particle updated } // End of Particle loop on all Ip } // End of a time step, increment to next time_now += dt_sec; // if time_frac >1, then it will fall out of the loop and not increment PP.timenow time_now+=dt_sec; } // Update the VBO pos[] for(int Ip = cudaindex; Ip <NUM_PARTICLES; Ip += stride){ int Ipx = Ip%MAX_GLPARTICLES; // Not too many and only from moveable points if (PP[Ip].state == 0) {// white boundary // Set ColorClass to 1.0 pos[Ipx] = make_float4(PP[Ip].x_present,PP[Ip].y_present,PP[Ip].z_present, 0.0f); } else if(PP[Ip].state == 2) {// Aground place at zero zero origin // Set ColorClass to 1.0 pos[Ipx] = make_float4(0.0f,0.0f,0.05f, 1.0f); } else if(PP[Ip].state == 1) {// regular moving point // Set ColorClass to float value between 0.0-6.0 // To accommodate states 0 and 2, add 2.0 to push to 2-8 // all modes work with shaderpipe.vs float ColorClass; double NumColors = 6.; if (MM[0].color_mode == 0) { // ColorByRelease double agesec = PP[Ip].Release_time - MM[0].time_init; ColorClass = (agesec/MM[0].age_class) ; if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 1) { // ColorByAge double agesec = time_now- PP[Ip].Release_time; ColorClass = (agesec/MM[0].age_class) ; //% NumColors; if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 2) { // ColorByPulse double agesec = PP[Ip].Release_time - MM[0].time_init; //ColorClass = floor(agesec/(MM[0].pulse_spacing)) ; //% NumColors; ColorClass = agesec/(10.*MM[0].pulse_spacing) ; //% NumColors; while (ColorClass > NumColors) ColorClass-=NumColors; //% NumColors; } else if (MM[0].color_mode == 3) {// ColorByDepth // with Zeta depth can be positive. Make sure code is greater than 1.0 , // 1.0 -> 0.0 interval should be first colorclass. ColorClass = max(1.0-(PP[Ip].z_present/MM[0].depthcolorinterval),0.0); if (ColorClass>NumColors) ColorClass=NumColors; } else if (MM[0].color_mode == 4) {// ColorByOrigin // Really just color by Latitude of XYZstart with offset and scaling in meters ColorClass = (PP[Ip].XYZstart[1]+315000.)/10000.; while (ColorClass < 0.0) ColorClass+=10000.; //% NumColors; while (ColorClass > NumColors) ColorClass-=NumColors; //% NumColors; } else if (MM[0].color_mode == 5) {// ColorBySigma // Sigma values expanded out to 1 : 6. to give 1. 1.5 2. 2.5 etc.. ColorClass = (PP[Ip].Sigma)*NumColors; if (ColorClass < 0.0) ColorClass=0.; //% NumColors; if (ColorClass > NumColors) ColorClass=NumColors; //% NumColors; } // Add 2 to ColorClass so that cases for 0 and 1 are accommodated pos[Ipx] = make_float4(PP[Ip].x_present,PP[Ip].y_present,PP[Ip].z_present, ColorClass+2. ); } } // end of move() if ( cudaindex==0) DD[0].time_now = time_now; // Only update dev_DD[] once // Hopefully the other cudas have gotten started by now and don't need to read dev_DD[0].time_now } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// // update which triangle the iP is in for the three meshes iMM 012 UVW // note the iMM=2 also gives iele and factors for ANGLE and depth // Sets PP[Ip].i_ele4[iMM] and the factors PP[Ip].factor4[iMM][0:2] // for (in iMM=0; iMM<3; iMM++) __device__ void findiele(int Ip,int iMM,struct PPart *PP, struct MMesh *MM) { int i_ele, keepgoing, k; float xpart, ypart; float smallest_value = -0.05000; // -.01 -0.001; // Find surrounding triangle of Particle i_ele = PP[Ip].i_ele4[iMM]; xpart = PP[Ip].x_present; ypart = PP[Ip].y_present; //return; //if(Ip==0) printf(" start findiele i_ele=%d \n",i_ele); // Check for out of domain/ grounded particle // do work if in-domain else increment igrounded and skip main part of move if (i_ele >= 0 && PP[Ip].state==1) { keepgoing = 1; while (keepgoing > 0 ){ // if any of the f's are negative, walk that way and restart while loop k=0; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value) { i_ele = MM[iMM].tri_connect[i_ele][0]; } else { k=1; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value ) { i_ele = MM[iMM].tri_connect[i_ele][1] ; } else { k=2; PP[Ip].factor4[iMM][k]=MM[iMM].a_frac[i_ele][k]*xpart + MM[iMM].b_frac[i_ele][k]*ypart + MM[iMM].c_frac[i_ele][k]; if ( PP[Ip].factor4[iMM][k] < smallest_value ) { i_ele = MM[iMM].tri_connect[i_ele][2] ; } else { // Found it, iele, all f's are positive keepgoing = 0; } } } if (i_ele < 0) { // newly grounded particle, zero him out. PP[Ip].state = 2; // set state = grounded PP[Ip].factor4[iMM][0]=0.0; PP[Ip].factor4[iMM][1]=0.0; PP[Ip].factor4[iMM][2]=0.0; PP[Ip].i_ele4[iMM] = i_ele; keepgoing = 0; } if (keepgoing>0) keepgoing++; if (keepgoing > 7000) { printf(" k%d ",Ip); PP[Ip].state = 2; // set state = grounded i_ele=-1; PP[Ip].i_ele4[iMM] = -1; PP[Ip].x_present=0.0; PP[Ip].y_present=0.0; PP[Ip].z_present=0.0; keepgoing=0;} } //return; if (i_ele>=0){ // good particle still in the mesh PP[Ip].i_ele4[iMM]=i_ele;} // end of while keepgoing // did it finish in a good element? if not !good ground it. // if (MM[iMM].goodele[i_ele]) PP[Ip].state = 2; } return; } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// // find 2d interpolated ANGLE(icase==0), depth(icase==1) // input is X,Y, A of i_ele points along with factor4 // MM[iMM].X[i_ele4[0:2]] MM[iMM].Y[i_ele4[0:2]] MM[iMM].ANGLE[i_ele4[0:2]] // PP[Ip].factor4[iMM][i_ele[0:]] // 2Dinterpolate(Ip,iMM,PP,MM,icase); // icase = 0U, 1V, 2W, 3ANGLE, 4depth // maybe do VAR[3] = MM[iMM].ANGLE[i_ele4[0:2]] instead of icase // That way we can feed it the vertical interpolates of UVW[3] //float VAR[3]; //iMM=3; for (int i=0; i<3; i++) VAR[i]=MM[iMM].angle[PP[Ip].iele4[iMM][i]]; //float angle = 2Dinterpolate(Ip,iMM,PP,MM,VAR); //iMM=4; for (int i=0; i<3; i++) VAR[i]=MM[iMM].depth[PP[Ip].iele4[iMM][i]]; //float depth = 2Dinterpolate(Ip,iMM,PP,MM,VAR); __device__ void Interpolate2D(int Ip, int iMM, struct PPart *PP, float *VAR) { float factor0=PP[Ip].factor4[iMM][0]; float factor1=PP[Ip].factor4[iMM][1]; float factor2=PP[Ip].factor4[iMM][2]; PP[Ip].answer[0] = factor0*VAR[0]+factor1*VAR[1]+factor2*VAR[2]; } ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////// __device__ void Interpolatesigma(int Ip, int iMM, struct PPart *PP, struct DData *DD, struct MMesh *MM, float depth, float ZETA, float time_now ) { // Find the isigmap, isigmam and sigmafrac // U[iP] = U[isigmap]*sigmafrac +U[isigmam]*(1.0-sigmafrac) // do three times and use timefrac to produce final VAR[3] at corners // iMM = 0U 1V 2W // Only works for UVW. In future add special cases 3T 4S iMM=iMM-1 or -2 int DDT0, DDT2; int IpTest = -250; int i_ele = PP[Ip].i_ele4[iMM]; float vart[3]; float var[3]; int sp, sm; float sigIp = max( min(PP[Ip].z_present / (depth-ZETA) , -0.01) , -0.99); // 0 to -1.0 //float sigIp = PP[Ip].z_present / depth ; // 0 to -1.0 // count up in sp to walk down in depth sp=1; while(MM[iMM].sigma[sp]< sigIp) sp++; // increment if sp is still above sigIp sm = sp-1; // sp is below sigIp, sm is above float sigfrac = (sigIp-MM[iMM].sigma[sp])/(MM[iMM].sigma[sm]- MM[iMM].sigma[sp]); // Pick out the three DD's to interpolate in time int DD3[3]; DD3[0]=DD[0].DD3[0]; DD3[1]=DD[0].DD3[1]; DD3[2]=DD[0].DD3[2]; int DDT0=DD3[0]; //DDT1=DD3[1]; int DDT2=DD3[2]; if (Ip==IpTest ) printf(" start of interpretsigma iMM=%d z_present= %g /depth=%g =sigIP = %g \n sm,sp sigma[%d]=%g sigma[%d]=%g sigIP %g sigfrac %g\n" ,iMM,PP[Ip].z_present, depth,sigIp,sm,MM[iMM].sigma[sm],sp,MM[iMM].sigma[sp],sigIp,sigfrac); // loop on time DD3[i] // loop on three corners ele[i_ele][j] // average sm and sp at the corner for (int it=0; it<3; it++){ // time loop for DD3[it] for (int j=0; j<3; j++){ // loop around corners to get sigma averaged variable long ele0=MM[iMM].ele[i_ele][j]; if (iMM==0){ // U var[j] = DD[DD3[it]].U[sm][ele0]*sigfrac + DD[DD3[it]].U[sp][ele0]* (1.0 - sigfrac); } else if (iMM==1){ // V var[j] = DD[DD3[it]].V[sm][ele0]*sigfrac + DD[DD3[it]].V[sp][ele0]* (1.0 - sigfrac); } else if (iMM==2){ // W var[j] = DD[DD3[it]].W[sm][ele0]*sigfrac + DD[DD3[it]].W[sp][ele0]* (1.0 - sigfrac); } else { printf(" \n\n Bad iMM in Interpolatesigma %d\n\n",iMM); } } // Have sigma average var[0:2] at the three corners //interpolate to center, to get three time increments vart[0:2] vart[it]= PP[Ip].factor4[iMM][0]*var[0] + PP[Ip].factor4[iMM][1]*var[1] + PP[Ip].factor4[iMM][2]*var[2]; if (Ip==IpTest ) printf(" intersig DD3=%d var=%g %g %g vart=%g\n " ,DD3[it],var[0],var[1],var[2],vart[it]); } // Finally interpolate in time to get final answer for U, V, W to mover PP[Ip] // float time_now = DD[0].time_now; // Will use dev_DD after the first pass with new DD float time_frac=(time_now - DD[DDT0].time)/(DD[DDT2].time - DD[DDT0].time); //float a = 2.*vart[2] -4.*vart[1] +2.*vart[0]; //float b = - vart[2] +4.*vart[1] -3.*vart[0]; //float c = vart[0]; //float Upnow = a*time_frac*time_frac + b*time_frac + c; float Upnow = ( 2.*vart[2] -4.*vart[1] +2.*vart[0])*time_frac*time_frac +(- vart[2] +4.*vart[1] -3.*vart[0])*time_frac +( vart[0]); /* Now have time sigma and space interpolates of U,V,W for particle */ PP[Ip].answer[0] = Upnow; if (Ip==IpTest ) printf(" intersigend timenow=%fs timefrac=%g Upnow=%g\n ",time_now,time_frac,Upnow); }
12c82c790ee349f4da1a45a67046ba97ea6728de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef SDK_CROSS_DISSOLVE #define SDK_CROSS_DISSOLVE #if __CUDACC_VER_MAJOR__ >= 9 #include <hip/hip_fp16.h> #endif #include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h #include "PrGPU/KernelSupport/KernelMemory.h" #if GF_DEVICE_TARGET_DEVICE GF_KERNEL_FUNCTION(kCrossDissolveCUDA, ((GF_PTR(float4 const))(outImg)) ((GF_PTR(float4 const))(inImg)) ((GF_PTR(float4))(destImg)), ((unsigned int)(outPitch)) ((unsigned int)(inPitch)) ((unsigned int)(destPitch)) ((int)(in16f)) ((unsigned int)(inWidth)) ((unsigned int)(inHeight)) ((float)(inProgress)) ((int)(inFlip)), ((uint2)(inXY)(KERNEL_XY))) { float4 outgoing, incoming, dest; if ( inXY.x >= inWidth || inXY.y >= inHeight ) return; outgoing = ReadFloat4(outImg, inXY.y * outPitch + inXY.x, !!in16f); incoming = ReadFloat4(inImg, inXY.y * inPitch + inXY.x, !!in16f); float outgoingAlphaWeighted = outgoing.w * (1.0f - inProgress); float incomingAlphaWeighted = incoming.w * inProgress; float newAlpha = outgoingAlphaWeighted + incomingAlphaWeighted ; float recipNewAlpha = newAlpha != 0.0f ? 1.0f / newAlpha : 0.0f; dest.x = (outgoing.x * outgoingAlphaWeighted + incoming.x * incomingAlphaWeighted) * recipNewAlpha; dest.y = (outgoing.y * outgoingAlphaWeighted + incoming.y * incomingAlphaWeighted) * recipNewAlpha; dest.z = (outgoing.z * outgoingAlphaWeighted + incoming.z * incomingAlphaWeighted) * recipNewAlpha; dest.w = newAlpha; WriteFloat4(dest, destImg, inXY.y * outPitch + inXY.x, !!in16f); } #endif #if __NVCC__ void CrossDissolve_CUDA ( float const *outBuf, float const *inBuf, float *destBuf, unsigned int outPitch, unsigned int inPitch, unsigned int destPitch, int is16f, unsigned int width, unsigned int height, float progress, int flip ) { dim3 blockDim (16, 16, 1); dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 ); hipLaunchKernelGGL(( kCrossDissolveCUDA) , dim3(gridDim), dim3(blockDim), 0 , 0, (float4 const*) outBuf, (float4 const*) inBuf, (float4*) destBuf, outPitch, inPitch, destPitch, is16f, width, height, progress, flip ); hipDeviceSynchronize(); } #endif //GF_DEVICE_TARGET_HOST #endif //SDK_CROSS_DISSOLVE
12c82c790ee349f4da1a45a67046ba97ea6728de.cu
#ifndef SDK_CROSS_DISSOLVE #define SDK_CROSS_DISSOLVE #if __CUDACC_VER_MAJOR__ >= 9 #include <cuda_fp16.h> #endif #include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h #include "PrGPU/KernelSupport/KernelMemory.h" #if GF_DEVICE_TARGET_DEVICE GF_KERNEL_FUNCTION(kCrossDissolveCUDA, ((GF_PTR(float4 const))(outImg)) ((GF_PTR(float4 const))(inImg)) ((GF_PTR(float4))(destImg)), ((unsigned int)(outPitch)) ((unsigned int)(inPitch)) ((unsigned int)(destPitch)) ((int)(in16f)) ((unsigned int)(inWidth)) ((unsigned int)(inHeight)) ((float)(inProgress)) ((int)(inFlip)), ((uint2)(inXY)(KERNEL_XY))) { float4 outgoing, incoming, dest; if ( inXY.x >= inWidth || inXY.y >= inHeight ) return; outgoing = ReadFloat4(outImg, inXY.y * outPitch + inXY.x, !!in16f); incoming = ReadFloat4(inImg, inXY.y * inPitch + inXY.x, !!in16f); float outgoingAlphaWeighted = outgoing.w * (1.0f - inProgress); float incomingAlphaWeighted = incoming.w * inProgress; float newAlpha = outgoingAlphaWeighted + incomingAlphaWeighted ; float recipNewAlpha = newAlpha != 0.0f ? 1.0f / newAlpha : 0.0f; dest.x = (outgoing.x * outgoingAlphaWeighted + incoming.x * incomingAlphaWeighted) * recipNewAlpha; dest.y = (outgoing.y * outgoingAlphaWeighted + incoming.y * incomingAlphaWeighted) * recipNewAlpha; dest.z = (outgoing.z * outgoingAlphaWeighted + incoming.z * incomingAlphaWeighted) * recipNewAlpha; dest.w = newAlpha; WriteFloat4(dest, destImg, inXY.y * outPitch + inXY.x, !!in16f); } #endif #if __NVCC__ void CrossDissolve_CUDA ( float const *outBuf, float const *inBuf, float *destBuf, unsigned int outPitch, unsigned int inPitch, unsigned int destPitch, int is16f, unsigned int width, unsigned int height, float progress, int flip ) { dim3 blockDim (16, 16, 1); dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 ); kCrossDissolveCUDA <<< gridDim, blockDim, 0 >>> ( (float4 const*) outBuf, (float4 const*) inBuf, (float4*) destBuf, outPitch, inPitch, destPitch, is16f, width, height, progress, flip ); cudaDeviceSynchronize(); } #endif //GF_DEVICE_TARGET_HOST #endif //SDK_CROSS_DISSOLVE
60cf56f88df3a23aa9320910a7beb92299423978.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <opencv2/opencv.hpp> #include <stdlib.h> #include <time.h> #include <stdint.h> #include <inttypes.h> #define MAX_BLOCK_SIZE 32 #define MAX_WINDOW_SIZE 55 #define MAX_DISP 1000 #define NCHANS 3 #define BLOCK_SIZE 16 // timing utility struct timespec check_timer(const char* str, struct timespec* ts){ struct timespec oldtime; // copy old time over oldtime.tv_nsec = ts->tv_nsec; oldtime.tv_sec = ts->tv_sec; // update ts clock_gettime(CLOCK_REALTIME, ts); // print old time int diffsec; int diffnsec; if(str != NULL){ diffsec = ts->tv_sec - oldtime.tv_sec; diffnsec = ts->tv_nsec - oldtime.tv_nsec; // correct the values if we measured over an integer second break: if(diffnsec < 0){ diffsec--; diffnsec += 1000000000; } printf("%s:%ds %fms\n",str,diffsec,diffnsec/1e6); } return (struct timespec) {diffsec, diffnsec}; } // little bitty kernel to initialize blocks of device memory __global__ void gpu_memset(unsigned char* start, unsigned char value, int length){ int tx = threadIdx.x; int bx = blockIdx.x; int gx = bx*blockDim.x + tx; if(gx < length){ start[gx] = value; } } // teeny little helper function void gpu_perror(char* input){ printf("%s: %s\n", input, hipGetErrorString(hipGetLastError())); } // Device code __global__ void asw_kernel(unsigned char* global_left, unsigned char* global_right, unsigned char* output, unsigned char* debug, int nrows, int ncols, int nchans, int ndisp, int win_size, int win_rad, float s_sigma, float c_sigma) { extern __shared__ unsigned char ref[]; // contains both left and right image data // get the size of the sub-images that we are considering // reference window int ref_width_bytes = (2*win_rad+blockDim.x)*NCHANS*sizeof(unsigned char); // int ref_rows = (2*win_rad+blockDim.y); // target window int tgt_width_bytes = (ndisp+2*win_rad+blockDim.x)*NCHANS*sizeof(unsigned char); // int tgt_rows = (2*win_rad+blockDim.y); unsigned char* tgt = (unsigned char*)(&ref[ ref_width_bytes*(2*win_rad+blockDim.y) ]); // tgt image, reference to somwhere of shared allocated memory float ref_c_factor; float tgt_c_factor; float s_factor; float ref_c2p_diff; float tgt_c2p_diff; float ref2tgt_diff; // variables for keeping track of the output float weight; float cost; float min_cost; unsigned char min_cost_index; unsigned char ref_center_pix[3]; // unsigned char tgt_center_pix[3]; unsigned char ref_pix[3]; unsigned char tgt_pix[3]; int disp; int win_x; int win_y; int dx; int tgt_x; // get identity of this thread (changing these to #define's) #define tx (threadIdx.x) #define ty (threadIdx.y) #define bx (blockIdx.x + 5) #define by (blockIdx.y + 1) #define gx (bx*blockDim.x + tx) #define gy (by*blockDim.y + ty) // setup LUTs // nevermind... right now there are none // copy relevant subimages to shared memory // TODO: additional boundary checks on this data // TODO: better division technique // TODO: investigate where syncthreads() needs to be called for best performance // starting with reference image: (4 deleted register variables) // int xblocks = (ref_width_bytes / blockDim.x + 1); // int yblocks = ((2*win_rad+blockDim.y) / blockDim.y + 1); // int xstart = ((bx*blockDim.x - win_rad)*NCHANS); // int ystart = (gy - win_rad); for(win_x = 0; win_x < (ref_width_bytes / blockDim.x + 1); win_x++){ // int x_idx = (win_x*blockDim.x + tx); // int g_x_idx = (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx); if((win_x*blockDim.x + tx) < ref_width_bytes){ for(win_y = 0; win_y < ((2*win_rad+blockDim.y) / blockDim.y + 1); win_y++){ // int y_idx = (win_y*blockDim.y + ty); // int g_y_idx = ((gy - win_rad) + win_y*blockDim.y); if((win_y*blockDim.y + ty) < (2*win_rad+blockDim.y)){ // copy bytes (not pixels) from global_left into reference image ref[(win_y*blockDim.y + ty)*ref_width_bytes + (win_x*blockDim.x + tx)] = global_left[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx)]; // copy into the debug image (only made to work with a single block of threads) // debug[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx)] = ref[(win_y*blockDim.y + ty)*ref_width_bytes + (win_x*blockDim.x + tx)]; } } } } // then to the target image: (4 deleted register variables) // xblocks = (tgt_width_bytes / blockDim.x + 1); // yblocks = ((2*win_rad+blockDim.y) / blockDim.y + 1); // xstart = ((bx*blockDim.x - win_rad - ndisp)*NCHANS); // ystart = (gy - win_rad); for(win_x = 0; win_x < (tgt_width_bytes / blockDim.x + 1); win_x++){ // int x_idx = (win_x*blockDim.x + tx); // int g_x_idx = (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx); if((win_x*blockDim.x + tx) < tgt_width_bytes){ for(win_y = 0; win_y < ((2*win_rad+blockDim.y) / blockDim.y + 1); win_y++){ // int y_idx = (win_y*blockDim.y + ty); // int g_y_idx = ((gy - win_rad) + win_y*blockDim.y); if((win_y*blockDim.y + ty) < (2*win_rad+blockDim.y)){ // copy bytes (not pixels) from global_left into reference image tgt[(win_y*blockDim.y + ty)*tgt_width_bytes + (win_x*blockDim.x + tx)] = global_right[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx)]; // copy into the debug image (only made to work with a single block of threads) // debug[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx)] = tgt[(win_y*blockDim.y + ty)*tgt_width_bytes + (win_x*blockDim.x + tx)]; } } } } __syncthreads(); // get a pointer to the ref_center_pix, which is constant for any given thread ref_center_pix[0] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 0]; ref_center_pix[1] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 1]; ref_center_pix[2] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 2]; // initialize min_cost to some arbitrarily large value min_cost = 1e12; // initialize min_cost_index to 0 min_cost_index = 0; // for each value of ndisp for(disp = 0; disp < ndisp; disp++){ // get a pointer to the tgt_center_pix, which is constant for each disp // ... except I get better results by using ref_center_pix to compare to tgt_pix // tgt_center_pix[0] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 0]; // tgt_center_pix[1] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 1]; // tgt_center_pix[2] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 2]; // reset weight and cost weight = 0; cost = 0; // in each row in the window: for(win_x = 0; win_x < win_size; win_x++){ // locate the pixel in the ref image (deleted this var) dx = win_x + tx; // locate the pixel in the tgt image (deleted this var) tgt_x = ndisp + win_x + tx - disp; // find the window-center to pixel x-distance (deleted this var) // int dx = win_x - win_rad; // in each column of the window: for(win_y = 0; win_y < win_size; win_y++){ // locate the pixel in the ref image (deleted this var) // int ref_y = win_y + ty; // find the window-center to pixel y-distance (deleted this var) // int dy = win_y - win_rad; // get the radius^2 value (deleted this var) // float radius_2 = (win_x-win_rad)*(win_x-win_rad) + (win_y-win_rad)*(win_y-win_rad); // get the s_factor for this particular window location s_factor = __expf(-((win_x-win_rad)*(win_x-win_rad) + (win_y-win_rad)*(win_y-win_rad))/(2.*s_sigma*s_sigma)); // store tgt and ref pixels in register memory ref_pix[0] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 0]; ref_pix[1] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 1]; ref_pix[2] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 2]; tgt_pix[0] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 0]; tgt_pix[1] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 1]; tgt_pix[2] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 2]; // get the center-to-pixel and overall color differences (organized together for IDP) ref_c2p_diff = abs(ref_center_pix[0] - ref_pix[0]); tgt_c2p_diff = abs(ref_center_pix[0] - tgt_pix[0]); ref2tgt_diff = abs(ref_pix[0] - tgt_pix[0]); ref_c2p_diff += abs(ref_center_pix[1] - ref_pix[1]); tgt_c2p_diff += abs(ref_center_pix[1] - tgt_pix[1]); ref2tgt_diff+= abs(ref_pix[1] - tgt_pix[1]); ref_c2p_diff += abs(ref_center_pix[2] - ref_pix[2]); tgt_c2p_diff += abs(ref_center_pix[2] - tgt_pix[2]); ref2tgt_diff+= abs(ref_pix[2] - tgt_pix[2]); // get the c_factors ref_c_factor = __expf(-ref_c2p_diff*ref_c2p_diff/(2.*c_sigma*c_sigma)); tgt_c_factor = __expf(-tgt_c2p_diff*tgt_c2p_diff/(2.*c_sigma*c_sigma)); // calulate the pix_weight (this variable has been done away with to increase ILP) // pix_weight = s_factor*ref_c_factor*tgt_c_factor; // add in the cost cost += s_factor*ref_c_factor*tgt_c_factor*ref2tgt_diff; // add in the weight weight += s_factor*ref_c_factor*tgt_c_factor; } } // now that the window is done, compare this cost (after normalizing) to min_cost if( min_cost > cost / weight){ min_cost = cost / weight; min_cost_index = disp; } __syncthreads(); } // set the output to the index of min_cost output[gy*ncols + gx] = min_cost_index; } int asw(cv::Mat im_l, cv::Mat im_r, int ndisp, int s_sigma, int c_sigma){ // window size and win_rad int win_rad = 1.5*s_sigma; int win_size = 2*win_rad+1; // declare timer struct timespec ts; // check that images are matching dimensions if(im_l.rows != im_r.rows){ printf("Error: im_l and im_r do not have matching row count\n"); return 1; } if(im_l.cols != im_r.cols){ printf("Error: im_l and im_r do not have matching col count\n"); return 1; } if(im_l.channels() != im_r.channels()){ printf("Error: im_l and im_r do not have matching channel count\n"); return 1; } // set easy-access variables for number of rows, cols, and chans int nrows = im_l.rows; int ncols = im_l.cols; int nchans = im_l.channels(); // initialize the device input arrays unsigned char* d_im_l; hipMalloc(&d_im_l,nchans*nrows*ncols*sizeof(unsigned char)); unsigned char* d_im_r; hipMalloc(&d_im_r,nchans*nrows*ncols*sizeof(unsigned char)); // initialize the output data matrix unsigned char* out = (unsigned char*)malloc(nrows*ncols*sizeof(unsigned char)); unsigned char* d_out; hipMalloc(&d_out,nrows*ncols*sizeof(unsigned char)); unsigned char* debug = (unsigned char*)malloc(nrows*ncols*nchans*sizeof(unsigned char)); unsigned char* d_debug; hipMalloc(&d_debug,nchans*nrows*ncols*sizeof(unsigned char)); // define a shortcut to the host data arrays unsigned char* data_l = ((unsigned char*)(im_l.data)); unsigned char* data_r = ((unsigned char*)(im_r.data)); // initialize the outputs (otherwise changes persist between runtimes, hard to debug): int tpb = 1024; int bpg = nrows*ncols*sizeof(unsigned char) / tpb + 1; hipLaunchKernelGGL(( gpu_memset), dim3(bpg), dim3(tpb), 0, 0, d_out,25,nrows*ncols*sizeof(unsigned char)); // gpu_perror("memset1"); hipLaunchKernelGGL(( gpu_memset), dim3(nchans*bpg), dim3(tpb), 0, 0, d_debug,25,nchans*nrows*ncols*sizeof(unsigned char)); // gpu_perror("memset2"); // check some values before calling the asw_kernel size_t reference_window_size = (2*win_rad+BLOCK_SIZE)*(2*win_rad+BLOCK_SIZE)*sizeof(unsigned char)*nchans; size_t target_window_size = (2*win_rad+ndisp+BLOCK_SIZE)*(BLOCK_SIZE+2*win_rad)*sizeof(unsigned char)*nchans; size_t shared_size = target_window_size+reference_window_size; if(shared_size > 47000){ printf("FATAL ERROR: shared_size for asw_kernel exceeds the device limit (48 kB), exiting\n"); return 1; } //copy the host input data to the device hipMemcpy(d_im_l, data_l, nchans*nrows*ncols*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_im_r, data_r, nchans*nrows*ncols*sizeof(unsigned char), hipMemcpyHostToDevice); // start the timer check_timer(NULL, &ts); // call the asw_kernel dim3 blocksPerGrid(22,21); dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); // __global__ void asw_kernel(unsigned char* global_left, unsigned char* global_right, unsigned char* output, unsigned char* debug, // int nrows, int ncols, int nchans, int ndisp, int win_size, int win_rad, float s_sigma, float c_sigma) hipLaunchKernelGGL(( asw_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), shared_size, 0, d_im_l, d_im_r, d_out, d_debug, nrows, ncols, nchans, ndisp, win_size, win_rad, s_sigma, c_sigma); hipDeviceSynchronize(); check_timer("gpu_asw", &ts); // gpu_perror("asw_kernel"); // copy the device output data to the host hipMemcpy(out, d_out, nrows*ncols*sizeof(unsigned char), hipMemcpyDeviceToHost); hipMemcpy(debug, d_debug, nrows*ncols*nchans*sizeof(unsigned char), hipMemcpyDeviceToHost); // make an image and view it: cv::Mat im_out(nrows,ncols,CV_8UC1,out); cv::Mat im_debug(nrows,ncols,CV_8UC3,debug); cv::imwrite("out/gpu_asw.png",im_out*255/ndisp); //cv::imshow("window",im_out*255/ndisp); //cv::waitKey(0); // cleanup memory hipFree(d_im_l); hipFree(d_im_r); hipFree(d_out); hipFree(d_debug); free(out); free(debug); return 0; } int main(int argc, char** argv){ // spacial and color sigmas int s_sigma = 5; int c_sigma = 50; // number of disparities to check int ndisp = 64; // input images cv::Mat im_l = cv::imread("l.png"); cv::Mat im_r = cv::imread("r.png"); return asw(im_l, im_r, ndisp, s_sigma, c_sigma); }
60cf56f88df3a23aa9320910a7beb92299423978.cu
#include <stdio.h> #include <opencv2/opencv.hpp> #include <stdlib.h> #include <time.h> #include <stdint.h> #include <inttypes.h> #define MAX_BLOCK_SIZE 32 #define MAX_WINDOW_SIZE 55 #define MAX_DISP 1000 #define NCHANS 3 #define BLOCK_SIZE 16 // timing utility struct timespec check_timer(const char* str, struct timespec* ts){ struct timespec oldtime; // copy old time over oldtime.tv_nsec = ts->tv_nsec; oldtime.tv_sec = ts->tv_sec; // update ts clock_gettime(CLOCK_REALTIME, ts); // print old time int diffsec; int diffnsec; if(str != NULL){ diffsec = ts->tv_sec - oldtime.tv_sec; diffnsec = ts->tv_nsec - oldtime.tv_nsec; // correct the values if we measured over an integer second break: if(diffnsec < 0){ diffsec--; diffnsec += 1000000000; } printf("%s:%ds %fms\n",str,diffsec,diffnsec/1e6); } return (struct timespec) {diffsec, diffnsec}; } // little bitty kernel to initialize blocks of device memory __global__ void gpu_memset(unsigned char* start, unsigned char value, int length){ int tx = threadIdx.x; int bx = blockIdx.x; int gx = bx*blockDim.x + tx; if(gx < length){ start[gx] = value; } } // teeny little helper function void gpu_perror(char* input){ printf("%s: %s\n", input, cudaGetErrorString(cudaGetLastError())); } // Device code __global__ void asw_kernel(unsigned char* global_left, unsigned char* global_right, unsigned char* output, unsigned char* debug, int nrows, int ncols, int nchans, int ndisp, int win_size, int win_rad, float s_sigma, float c_sigma) { extern __shared__ unsigned char ref[]; // contains both left and right image data // get the size of the sub-images that we are considering // reference window int ref_width_bytes = (2*win_rad+blockDim.x)*NCHANS*sizeof(unsigned char); // int ref_rows = (2*win_rad+blockDim.y); // target window int tgt_width_bytes = (ndisp+2*win_rad+blockDim.x)*NCHANS*sizeof(unsigned char); // int tgt_rows = (2*win_rad+blockDim.y); unsigned char* tgt = (unsigned char*)(&ref[ ref_width_bytes*(2*win_rad+blockDim.y) ]); // tgt image, reference to somwhere of shared allocated memory float ref_c_factor; float tgt_c_factor; float s_factor; float ref_c2p_diff; float tgt_c2p_diff; float ref2tgt_diff; // variables for keeping track of the output float weight; float cost; float min_cost; unsigned char min_cost_index; unsigned char ref_center_pix[3]; // unsigned char tgt_center_pix[3]; unsigned char ref_pix[3]; unsigned char tgt_pix[3]; int disp; int win_x; int win_y; int dx; int tgt_x; // get identity of this thread (changing these to #define's) #define tx (threadIdx.x) #define ty (threadIdx.y) #define bx (blockIdx.x + 5) #define by (blockIdx.y + 1) #define gx (bx*blockDim.x + tx) #define gy (by*blockDim.y + ty) // setup LUTs // nevermind... right now there are none // copy relevant subimages to shared memory // TODO: additional boundary checks on this data // TODO: better division technique // TODO: investigate where syncthreads() needs to be called for best performance // starting with reference image: (4 deleted register variables) // int xblocks = (ref_width_bytes / blockDim.x + 1); // int yblocks = ((2*win_rad+blockDim.y) / blockDim.y + 1); // int xstart = ((bx*blockDim.x - win_rad)*NCHANS); // int ystart = (gy - win_rad); for(win_x = 0; win_x < (ref_width_bytes / blockDim.x + 1); win_x++){ // int x_idx = (win_x*blockDim.x + tx); // int g_x_idx = (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx); if((win_x*blockDim.x + tx) < ref_width_bytes){ for(win_y = 0; win_y < ((2*win_rad+blockDim.y) / blockDim.y + 1); win_y++){ // int y_idx = (win_y*blockDim.y + ty); // int g_y_idx = ((gy - win_rad) + win_y*blockDim.y); if((win_y*blockDim.y + ty) < (2*win_rad+blockDim.y)){ // copy bytes (not pixels) from global_left into reference image ref[(win_y*blockDim.y + ty)*ref_width_bytes + (win_x*blockDim.x + tx)] = global_left[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx)]; // copy into the debug image (only made to work with a single block of threads) // debug[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad)*NCHANS) + win_x*blockDim.x + tx)] = ref[(win_y*blockDim.y + ty)*ref_width_bytes + (win_x*blockDim.x + tx)]; } } } } // then to the target image: (4 deleted register variables) // xblocks = (tgt_width_bytes / blockDim.x + 1); // yblocks = ((2*win_rad+blockDim.y) / blockDim.y + 1); // xstart = ((bx*blockDim.x - win_rad - ndisp)*NCHANS); // ystart = (gy - win_rad); for(win_x = 0; win_x < (tgt_width_bytes / blockDim.x + 1); win_x++){ // int x_idx = (win_x*blockDim.x + tx); // int g_x_idx = (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx); if((win_x*blockDim.x + tx) < tgt_width_bytes){ for(win_y = 0; win_y < ((2*win_rad+blockDim.y) / blockDim.y + 1); win_y++){ // int y_idx = (win_y*blockDim.y + ty); // int g_y_idx = ((gy - win_rad) + win_y*blockDim.y); if((win_y*blockDim.y + ty) < (2*win_rad+blockDim.y)){ // copy bytes (not pixels) from global_left into reference image tgt[(win_y*blockDim.y + ty)*tgt_width_bytes + (win_x*blockDim.x + tx)] = global_right[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx)]; // copy into the debug image (only made to work with a single block of threads) // debug[((gy - win_rad) + win_y*blockDim.y)*ncols*NCHANS + (((bx*blockDim.x - win_rad - ndisp)*NCHANS) + win_x*blockDim.x + tx)] = tgt[(win_y*blockDim.y + ty)*tgt_width_bytes + (win_x*blockDim.x + tx)]; } } } } __syncthreads(); // get a pointer to the ref_center_pix, which is constant for any given thread ref_center_pix[0] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 0]; ref_center_pix[1] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 1]; ref_center_pix[2] = ref[(win_rad + ty)*ref_width_bytes + (win_rad + tx)*NCHANS + 2]; // initialize min_cost to some arbitrarily large value min_cost = 1e12; // initialize min_cost_index to 0 min_cost_index = 0; // for each value of ndisp for(disp = 0; disp < ndisp; disp++){ // get a pointer to the tgt_center_pix, which is constant for each disp // ... except I get better results by using ref_center_pix to compare to tgt_pix // tgt_center_pix[0] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 0]; // tgt_center_pix[1] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 1]; // tgt_center_pix[2] = tgt[(win_rad + ty)*tgt_width_bytes + (ndisp + win_rad + tx - disp)*NCHANS + 2]; // reset weight and cost weight = 0; cost = 0; // in each row in the window: for(win_x = 0; win_x < win_size; win_x++){ // locate the pixel in the ref image (deleted this var) dx = win_x + tx; // locate the pixel in the tgt image (deleted this var) tgt_x = ndisp + win_x + tx - disp; // find the window-center to pixel x-distance (deleted this var) // int dx = win_x - win_rad; // in each column of the window: for(win_y = 0; win_y < win_size; win_y++){ // locate the pixel in the ref image (deleted this var) // int ref_y = win_y + ty; // find the window-center to pixel y-distance (deleted this var) // int dy = win_y - win_rad; // get the radius^2 value (deleted this var) // float radius_2 = (win_x-win_rad)*(win_x-win_rad) + (win_y-win_rad)*(win_y-win_rad); // get the s_factor for this particular window location s_factor = __expf(-((win_x-win_rad)*(win_x-win_rad) + (win_y-win_rad)*(win_y-win_rad))/(2.*s_sigma*s_sigma)); // store tgt and ref pixels in register memory ref_pix[0] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 0]; ref_pix[1] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 1]; ref_pix[2] = ref[(win_y+ty)*ref_width_bytes + (dx)*NCHANS + 2]; tgt_pix[0] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 0]; tgt_pix[1] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 1]; tgt_pix[2] = tgt[(win_y+ty)*tgt_width_bytes + (tgt_x)*NCHANS + 2]; // get the center-to-pixel and overall color differences (organized together for IDP) ref_c2p_diff = abs(ref_center_pix[0] - ref_pix[0]); tgt_c2p_diff = abs(ref_center_pix[0] - tgt_pix[0]); ref2tgt_diff = abs(ref_pix[0] - tgt_pix[0]); ref_c2p_diff += abs(ref_center_pix[1] - ref_pix[1]); tgt_c2p_diff += abs(ref_center_pix[1] - tgt_pix[1]); ref2tgt_diff+= abs(ref_pix[1] - tgt_pix[1]); ref_c2p_diff += abs(ref_center_pix[2] - ref_pix[2]); tgt_c2p_diff += abs(ref_center_pix[2] - tgt_pix[2]); ref2tgt_diff+= abs(ref_pix[2] - tgt_pix[2]); // get the c_factors ref_c_factor = __expf(-ref_c2p_diff*ref_c2p_diff/(2.*c_sigma*c_sigma)); tgt_c_factor = __expf(-tgt_c2p_diff*tgt_c2p_diff/(2.*c_sigma*c_sigma)); // calulate the pix_weight (this variable has been done away with to increase ILP) // pix_weight = s_factor*ref_c_factor*tgt_c_factor; // add in the cost cost += s_factor*ref_c_factor*tgt_c_factor*ref2tgt_diff; // add in the weight weight += s_factor*ref_c_factor*tgt_c_factor; } } // now that the window is done, compare this cost (after normalizing) to min_cost if( min_cost > cost / weight){ min_cost = cost / weight; min_cost_index = disp; } __syncthreads(); } // set the output to the index of min_cost output[gy*ncols + gx] = min_cost_index; } int asw(cv::Mat im_l, cv::Mat im_r, int ndisp, int s_sigma, int c_sigma){ // window size and win_rad int win_rad = 1.5*s_sigma; int win_size = 2*win_rad+1; // declare timer struct timespec ts; // check that images are matching dimensions if(im_l.rows != im_r.rows){ printf("Error: im_l and im_r do not have matching row count\n"); return 1; } if(im_l.cols != im_r.cols){ printf("Error: im_l and im_r do not have matching col count\n"); return 1; } if(im_l.channels() != im_r.channels()){ printf("Error: im_l and im_r do not have matching channel count\n"); return 1; } // set easy-access variables for number of rows, cols, and chans int nrows = im_l.rows; int ncols = im_l.cols; int nchans = im_l.channels(); // initialize the device input arrays unsigned char* d_im_l; cudaMalloc(&d_im_l,nchans*nrows*ncols*sizeof(unsigned char)); unsigned char* d_im_r; cudaMalloc(&d_im_r,nchans*nrows*ncols*sizeof(unsigned char)); // initialize the output data matrix unsigned char* out = (unsigned char*)malloc(nrows*ncols*sizeof(unsigned char)); unsigned char* d_out; cudaMalloc(&d_out,nrows*ncols*sizeof(unsigned char)); unsigned char* debug = (unsigned char*)malloc(nrows*ncols*nchans*sizeof(unsigned char)); unsigned char* d_debug; cudaMalloc(&d_debug,nchans*nrows*ncols*sizeof(unsigned char)); // define a shortcut to the host data arrays unsigned char* data_l = ((unsigned char*)(im_l.data)); unsigned char* data_r = ((unsigned char*)(im_r.data)); // initialize the outputs (otherwise changes persist between runtimes, hard to debug): int tpb = 1024; int bpg = nrows*ncols*sizeof(unsigned char) / tpb + 1; gpu_memset<<<bpg, tpb>>>(d_out,25,nrows*ncols*sizeof(unsigned char)); // gpu_perror("memset1"); gpu_memset<<<nchans*bpg, tpb>>>(d_debug,25,nchans*nrows*ncols*sizeof(unsigned char)); // gpu_perror("memset2"); // check some values before calling the asw_kernel size_t reference_window_size = (2*win_rad+BLOCK_SIZE)*(2*win_rad+BLOCK_SIZE)*sizeof(unsigned char)*nchans; size_t target_window_size = (2*win_rad+ndisp+BLOCK_SIZE)*(BLOCK_SIZE+2*win_rad)*sizeof(unsigned char)*nchans; size_t shared_size = target_window_size+reference_window_size; if(shared_size > 47000){ printf("FATAL ERROR: shared_size for asw_kernel exceeds the device limit (48 kB), exiting\n"); return 1; } //copy the host input data to the device cudaMemcpy(d_im_l, data_l, nchans*nrows*ncols*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_im_r, data_r, nchans*nrows*ncols*sizeof(unsigned char), cudaMemcpyHostToDevice); // start the timer check_timer(NULL, &ts); // call the asw_kernel dim3 blocksPerGrid(22,21); dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); // __global__ void asw_kernel(unsigned char* global_left, unsigned char* global_right, unsigned char* output, unsigned char* debug, // int nrows, int ncols, int nchans, int ndisp, int win_size, int win_rad, float s_sigma, float c_sigma) asw_kernel<<<blocksPerGrid, threadsPerBlock, shared_size>>>(d_im_l, d_im_r, d_out, d_debug, nrows, ncols, nchans, ndisp, win_size, win_rad, s_sigma, c_sigma); cudaDeviceSynchronize(); check_timer("gpu_asw", &ts); // gpu_perror("asw_kernel"); // copy the device output data to the host cudaMemcpy(out, d_out, nrows*ncols*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(debug, d_debug, nrows*ncols*nchans*sizeof(unsigned char), cudaMemcpyDeviceToHost); // make an image and view it: cv::Mat im_out(nrows,ncols,CV_8UC1,out); cv::Mat im_debug(nrows,ncols,CV_8UC3,debug); cv::imwrite("out/gpu_asw.png",im_out*255/ndisp); //cv::imshow("window",im_out*255/ndisp); //cv::waitKey(0); // cleanup memory cudaFree(d_im_l); cudaFree(d_im_r); cudaFree(d_out); cudaFree(d_debug); free(out); free(debug); return 0; } int main(int argc, char** argv){ // spacial and color sigmas int s_sigma = 5; int c_sigma = 50; // number of disparities to check int ndisp = 64; // input images cv::Mat im_l = cv::imread("l.png"); cv::Mat im_r = cv::imread("r.png"); return asw(im_l, im_r, ndisp, s_sigma, c_sigma); }
cb4af43e808601f3f886cdc3f766650f7e5606d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <stdio.h> #define gpuErrchck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __global__ void gpu_add(int n, int *a_device, int *b_device, int *c_device) { int bsize = blockDim.x; int tid = threadIdx.x; int i = blockIdx.x * bsize + tid; if( i < n ) c_device[i] = a_device[i] + b_device[i]; } void cpu_add(int n, int *a_host, int *b_host, int *c_host) { int i; for(i=0;i<n;++i) c_host[i] = a_host[i] + b_host[i]; } int main() { int N = 100000; int *a_host = new int[N]; int *b_host = new int[N]; int *c_host = new int[N]; int *a_device = NULL; int *b_device = NULL; int *c_device = NULL; gpuErrchck( hipMalloc((void**)&a_device,N*sizeof(int)) ); gpuErrchck( hipMalloc((void**)&b_device,N*sizeof(int)) ); gpuErrchck( hipMalloc((void**)&c_device,N*sizeof(int)) ); int i ; for(i=0;i<N;++i){ a_host[i] = 1; b_host[i] = 2; } clock_t start, end; double cpu_time_used; gpuErrchck( hipMemcpy((void*)a_device, (void*)a_host, N*sizeof(int), hipMemcpyHostToDevice) ); gpuErrchck( hipMemcpy((void*)b_device, (void*)b_host, N*sizeof(int), hipMemcpyHostToDevice) ); gpuErrchck( hipDeviceSynchronize() ); start = clock(); hipLaunchKernelGGL(( gpu_add), dim3(N/1000),dim3(1000) , 0, 0, N,a_device,b_device,c_device); gpuErrchck( hipDeviceSynchronize() ); end = clock(); gpuErrchck( hipMemcpy((void*)c_host, (void*)c_device, N*sizeof(int), hipMemcpyDeviceToHost) ); gpuErrchck( hipDeviceSynchronize() ); cout<<"GPU result: "<<c_host[1]<<endl; cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; cout<<"GPU Time used: "<<cpu_time_used<<endl; start = clock(); cpu_add(N,a_host,b_host,c_host); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; cout<<"CPU Time used: "<<cpu_time_used<<endl; cout<<"CPU result: "<<c_host[1]<<endl; delete[] a_host, b_host, c_host; hipFree((void*)a_device); hipFree((void*)b_device); hipFree((void*)c_device); return 0; }
cb4af43e808601f3f886cdc3f766650f7e5606d1.cu
#include <iostream> #include <time.h> #include <stdio.h> #define gpuErrchck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __global__ void gpu_add(int n, int *a_device, int *b_device, int *c_device) { int bsize = blockDim.x; int tid = threadIdx.x; int i = blockIdx.x * bsize + tid; if( i < n ) c_device[i] = a_device[i] + b_device[i]; } void cpu_add(int n, int *a_host, int *b_host, int *c_host) { int i; for(i=0;i<n;++i) c_host[i] = a_host[i] + b_host[i]; } int main() { int N = 100000; int *a_host = new int[N]; int *b_host = new int[N]; int *c_host = new int[N]; int *a_device = NULL; int *b_device = NULL; int *c_device = NULL; gpuErrchck( cudaMalloc((void**)&a_device,N*sizeof(int)) ); gpuErrchck( cudaMalloc((void**)&b_device,N*sizeof(int)) ); gpuErrchck( cudaMalloc((void**)&c_device,N*sizeof(int)) ); int i ; for(i=0;i<N;++i){ a_host[i] = 1; b_host[i] = 2; } clock_t start, end; double cpu_time_used; gpuErrchck( cudaMemcpy((void*)a_device, (void*)a_host, N*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchck( cudaMemcpy((void*)b_device, (void*)b_host, N*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchck( cudaDeviceSynchronize() ); start = clock(); gpu_add<<< N/1000,1000 >>>(N,a_device,b_device,c_device); gpuErrchck( cudaDeviceSynchronize() ); end = clock(); gpuErrchck( cudaMemcpy((void*)c_host, (void*)c_device, N*sizeof(int), cudaMemcpyDeviceToHost) ); gpuErrchck( cudaDeviceSynchronize() ); cout<<"GPU result: "<<c_host[1]<<endl; cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; cout<<"GPU Time used: "<<cpu_time_used<<endl; start = clock(); cpu_add(N,a_host,b_host,c_host); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; cout<<"CPU Time used: "<<cpu_time_used<<endl; cout<<"CPU result: "<<c_host[1]<<endl; delete[] a_host, b_host, c_host; cudaFree((void*)a_device); cudaFree((void*)b_device); cudaFree((void*)c_device); return 0; }
3c847ce9babe611ef3188824d011caa1aed8496a.hip
// !!! This is a file automatically generated by hipify!!! #include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/hip/HIPContext.h" #include <THH/THHDeviceUtils.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cuda_bf16.h> #include "type_shim.h" namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory <float> { __device__ float *getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory <double> { __device__ double *getPointer() { extern __shared__ double s_double[]; return s_double; } }; } template<typename T, typename U> __device__ void cuLoadWriteStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar ) { int i1 = i1_block+thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1*n2+i2; int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; if (i2<n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template<typename T, typename U> __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar ) { int i1 = i1_block+thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1*n2+i2; int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; if (i2<n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } } template<typename T, typename U> __global__ void cuComputePartGradGammaBeta( const T* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1+blockDim.y*blockDim.y-1) / (blockDim.y*blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y*blockDim.y; const int i1_beg_plus_one = (blockIdx.y+1) * segs_per_block * blockDim.y*blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x+1; const int thr_load_col_off = (threadIdx.x*blockDim.y)&(blockDim.x-1); const int thr_load_row_off = (threadIdx.x*blockDim.y)/blockDim.x + threadIdx.y*blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs(i1_beg,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); for (int i1_block = i1_beg+blockDim.y*blockDim.y; i1_block < i1_end; i1_block+=blockDim.y*blockDim.y) { cuLoadAddStridedInputs(i1_block,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k*blockDim.y; int idx1 = row1*row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y*row_stride+threadIdx.x] = acc1; warp_buf2[threadIdx.y*row_stride+threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y/2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1*row_stride + threadIdx.x; int idx2 = row2*row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1*row_stride + threadIdx.x; int idx2 = row2*row_stride + threadIdx.x; part_grad_beta[blockIdx.y*n2+i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y*n2+i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template<typename T, typename U> __global__ void cuComputeGradGammaBeta( const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, T* grad_gamma, T* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset*n2]; sum_beta += part_grad_beta_ptr[warp_offset*n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y/2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2*offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx+nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx+nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template<typename T, typename U> void HostLayerNormGradient( const T* dout, const U* mean, const U* invvar, at::Tensor* input, int n1, int n2, const T* gamma, const T* beta, double epsilon, T* grad_gamma, T* grad_beta ) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); if (gamma != NULL && beta != NULL) { // compute grad_gamma(j) and grad_beta(j) const int part_size = 16; const dim3 threads2(32,4,1); const dim3 blocks2((n2+threads2.x-1)/threads2.x,part_size,1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; at::Tensor part_grad_gamma = at::empty({part_size,n2}, input->options().dtype((input->scalar_type()==at::ScalarType::Half || input->scalar_type()==at::ScalarType::BFloat16) ? at::ScalarType::Float : input->scalar_type())); at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); hipLaunchKernelGGL(( cuComputePartGradGammaBeta), dim3(blocks2), dim3(threads2), nshared2, stream, dout, input->DATA_PTR<T>(), n1,n2, mean, invvar, U(epsilon), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>()); const dim3 threads3(32,8,1); const dim3 blocks3((n2+threads2.x-1)/threads2.x,1,1); const int nshared3 = threads3.x * threads3.y * sizeof(U); hipLaunchKernelGGL(( cuComputeGradGammaBeta), dim3(blocks3), dim3(threads3), nshared3, stream, part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size, n1,n2, grad_gamma, grad_beta); } } void cuda_layer_norm_gradient( at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int n1, int n2, #ifdef VERSION_GE_1_1 at::IntArrayRef normalized_shape, #else at::IntList normalized_shape, #endif at::Tensor* gamma, at::Tensor* beta, double epsilon, at::Tensor* grad_gamma, at::Tensor* grad_beta) { using namespace at; DISPATCH_DOUBLE_FLOAT_AND_HALF_AND_BF16(input->scalar_type(), 0, "cuComputeGradInput", using accscalar_t = at::acc_type<scalar_t_0, true>; HostLayerNormGradient( dout->DATA_PTR<scalar_t_0>(), mean->DATA_PTR<accscalar_t>(), invvar->DATA_PTR<accscalar_t>(), input, n1,n2, // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta // if gamma Tensor is NULL on input. gamma->DATA_PTR<scalar_t_0>(), beta->DATA_PTR<scalar_t_0>(), epsilon, grad_gamma->DATA_PTR<scalar_t_0>(), grad_beta->DATA_PTR<scalar_t_0>()); ) }
3c847ce9babe611ef3188824d011caa1aed8496a.cu
#include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/cuda/CUDAContext.h" #include <THC/THCDeviceUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_bf16.h> #include "type_shim.h" namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory <float> { __device__ float *getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory <double> { __device__ double *getPointer() { extern __shared__ double s_double[]; return s_double; } }; } template<typename T, typename U> __device__ void cuLoadWriteStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar ) { int i1 = i1_block+thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1*n2+i2; int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; if (i2<n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template<typename T, typename U> __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar ) { int i1 = i1_block+thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1*n2+i2; int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k; if (i2<n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } } template<typename T, typename U> __global__ void cuComputePartGradGammaBeta( const T* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1+blockDim.y*blockDim.y-1) / (blockDim.y*blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y*blockDim.y; const int i1_beg_plus_one = (blockIdx.y+1) * segs_per_block * blockDim.y*blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x+1; const int thr_load_col_off = (threadIdx.x*blockDim.y)&(blockDim.x-1); const int thr_load_row_off = (threadIdx.x*blockDim.y)/blockDim.x + threadIdx.y*blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs(i1_beg,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); for (int i1_block = i1_beg+blockDim.y*blockDim.y; i1_block < i1_end; i1_block+=blockDim.y*blockDim.y) { cuLoadAddStridedInputs(i1_block,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k*blockDim.y; int idx1 = row1*row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y*row_stride+threadIdx.x] = acc1; warp_buf2[threadIdx.y*row_stride+threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y/2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1*row_stride + threadIdx.x; int idx2 = row2*row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1*row_stride + threadIdx.x; int idx2 = row2*row_stride + threadIdx.x; part_grad_beta[blockIdx.y*n2+i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y*n2+i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template<typename T, typename U> __global__ void cuComputeGradGammaBeta( const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, T* grad_gamma, T* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset*n2]; sum_beta += part_grad_beta_ptr[warp_offset*n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y/2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2*offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx+nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx+nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template<typename T, typename U> void HostLayerNormGradient( const T* dout, const U* mean, const U* invvar, at::Tensor* input, int n1, int n2, const T* gamma, const T* beta, double epsilon, T* grad_gamma, T* grad_beta ) { auto stream = at::cuda::getCurrentCUDAStream().stream(); if (gamma != NULL && beta != NULL) { // compute grad_gamma(j) and grad_beta(j) const int part_size = 16; const dim3 threads2(32,4,1); const dim3 blocks2((n2+threads2.x-1)/threads2.x,part_size,1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; at::Tensor part_grad_gamma = at::empty({part_size,n2}, input->options().dtype((input->scalar_type()==at::ScalarType::Half || input->scalar_type()==at::ScalarType::BFloat16) ? at::ScalarType::Float : input->scalar_type())); at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); cuComputePartGradGammaBeta<<<blocks2, threads2, nshared2, stream>>>( dout, input->DATA_PTR<T>(), n1,n2, mean, invvar, U(epsilon), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>()); const dim3 threads3(32,8,1); const dim3 blocks3((n2+threads2.x-1)/threads2.x,1,1); const int nshared3 = threads3.x * threads3.y * sizeof(U); cuComputeGradGammaBeta<<<blocks3, threads3, nshared3, stream>>>( part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size, n1,n2, grad_gamma, grad_beta); } } void cuda_layer_norm_gradient( at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int n1, int n2, #ifdef VERSION_GE_1_1 at::IntArrayRef normalized_shape, #else at::IntList normalized_shape, #endif at::Tensor* gamma, at::Tensor* beta, double epsilon, at::Tensor* grad_gamma, at::Tensor* grad_beta) { using namespace at; DISPATCH_DOUBLE_FLOAT_AND_HALF_AND_BF16(input->scalar_type(), 0, "cuComputeGradInput", using accscalar_t = at::acc_type<scalar_t_0, true>; HostLayerNormGradient( dout->DATA_PTR<scalar_t_0>(), mean->DATA_PTR<accscalar_t>(), invvar->DATA_PTR<accscalar_t>(), input, n1,n2, // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta // if gamma Tensor is NULL on input. gamma->DATA_PTR<scalar_t_0>(), beta->DATA_PTR<scalar_t_0>(), epsilon, grad_gamma->DATA_PTR<scalar_t_0>(), grad_beta->DATA_PTR<scalar_t_0>()); ) }
61dc3bc2b8f13a7cd0fb79082c54fe386305ace0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. */ #include "utils.h" #define MAX_THREADS_PER_BLOCK 1024 __global__ void yourHisto(const unsigned int* const vals, //INPUT unsigned int* const histo, //OUPUT int numVals) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= 0 && idx < numVals) { int bin = vals[idx]; atomicAdd(histo + bin, 1); } } void computeHistogram(const unsigned int* const d_vals, //INPUT unsigned int* const d_histo, //OUTPUT const unsigned int numBins, const unsigned int numElems) { const dim3 blockSize(MAX_THREADS_PER_BLOCK, 1, 1); const dim3 gridSize(numElems / blockSize.x + 1); hipLaunchKernelGGL(( yourHisto), dim3(gridSize), dim3(blockSize), 0, 0, d_vals, d_histo, numElems); }
61dc3bc2b8f13a7cd0fb79082c54fe386305ace0.cu
/* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. */ #include "utils.h" #define MAX_THREADS_PER_BLOCK 1024 __global__ void yourHisto(const unsigned int* const vals, //INPUT unsigned int* const histo, //OUPUT int numVals) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= 0 && idx < numVals) { int bin = vals[idx]; atomicAdd(histo + bin, 1); } } void computeHistogram(const unsigned int* const d_vals, //INPUT unsigned int* const d_histo, //OUTPUT const unsigned int numBins, const unsigned int numElems) { const dim3 blockSize(MAX_THREADS_PER_BLOCK, 1, 1); const dim3 gridSize(numElems / blockSize.x + 1); yourHisto<<<gridSize, blockSize>>>(d_vals, d_histo, numElems); }
37ebbc34cda1f0c324fb0275bbd44279139ae1c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ unsigned int getTid3d3d(){ return blockDim.x * ( blockDim.y * ( blockDim.z + ( threadIdx.z * blockDim.y ) ) + threadIdx.y ) + threadIdx.x; } __device__ unsigned int getBid3d3d(){ return blockIdx.x + gridDim.x*(blockIdx.y + gridDim.y * blockIdx.z); } __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __device__ double2 mult(double2 a, double2 b){ return {a.x*b.x - a.y*b.y, a.x*b.y + a.y*b.x}; } __device__ double2 mult(double2 a, double b){ return {a.x*b, a.y*b}; } __global__ void multipass(double2* input, double2* output, int pass){ unsigned int tid = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z * blockDim.x * blockDim.y; unsigned int bid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //unsigned int tid = getTid3d3d(); //unsigned int bid = getBid3d3d(); // printf("bid0=%d\n",bid); unsigned int gid = getGid3d3d(); extern __shared__ double2 sdata[]; sdata[tid] = input[gid]; __syncthreads(); for(int i = blockDim.x>>1; i > 0; i>>=1){ if(tid < i){ sdata[tid].x += sdata[tid + i].x; sdata[tid].y += sdata[tid + i].y; } __syncthreads(); } if(tid==0){ output[bid] = sdata[0]; } }
37ebbc34cda1f0c324fb0275bbd44279139ae1c8.cu
#include "includes.h" __device__ unsigned int getTid3d3d(){ return blockDim.x * ( blockDim.y * ( blockDim.z + ( threadIdx.z * blockDim.y ) ) + threadIdx.y ) + threadIdx.x; } __device__ unsigned int getBid3d3d(){ return blockIdx.x + gridDim.x*(blockIdx.y + gridDim.y * blockIdx.z); } __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __device__ double2 mult(double2 a, double2 b){ return {a.x*b.x - a.y*b.y, a.x*b.y + a.y*b.x}; } __device__ double2 mult(double2 a, double b){ return {a.x*b, a.y*b}; } __global__ void multipass(double2* input, double2* output, int pass){ unsigned int tid = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z * blockDim.x * blockDim.y; unsigned int bid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //unsigned int tid = getTid3d3d(); //unsigned int bid = getBid3d3d(); // printf("bid0=%d\n",bid); unsigned int gid = getGid3d3d(); extern __shared__ double2 sdata[]; sdata[tid] = input[gid]; __syncthreads(); for(int i = blockDim.x>>1; i > 0; i>>=1){ if(tid < i){ sdata[tid].x += sdata[tid + i].x; sdata[tid].y += sdata[tid + i].y; } __syncthreads(); } if(tid==0){ output[bid] = sdata[0]; } }
e859ff1d81dfd8182563109e622e1e5008fce28a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by kotaro on 2019/12/13. // #include <cstdio> #include <iostream> #include "functions.h" #include "timer.hpp" /* * 1. pour data from Ciphertext * 2. memcpy to GPU * 3. perform evaluation * 4. send back to host * 5. Update Ciphertext instances */ /* * NOTE: * small_ntt_tables contains 'coeff_mod_count' of small_ntt_table */ // --------------------------------------------------------------------------- ElapsedTime rescale_to_next(const CuCiphertext &encrypted, CuCiphertext &destination, const CudaContextData &context, int cuda_device_id) { // http://www.slis.tsukuba.ac.jp/~fujisawa.makoto.fu/cgi-bin/wiki/index.php?CUDA%A4%C7%B9%D4%CE%F3%B1%E9%BB%BB%A1%A7%B2%C3%B8%BA%BB%BB hipSetDevice(cuda_device_id); #ifndef NDEBUG for (size_t i = 0; i < context.next_coeff_modulus.size(); i++) { assert(context.coeff_modulus.at(i) == context.next_coeff_modulus.at(i)); } #endif size_t coeff_count = context.coeff_count; int coeff_count_power = context.coeff_count_power; #ifndef NDEBUG cout << "Coeff Count: " << coeff_count << endl; cout << "Coeff Count Power: " << coeff_count_power << endl; #endif size_t coeff_modulus_size = context.coeff_modulus.size(); size_t coeff_modulus_const_ratio_size = context.coeff_modulus_const_ratio.size(); size_t next_coeff_modulus_size = context.next_coeff_modulus.size(); size_t next_coeff_modulus_const_ratio_size = context.next_coeff_modulus_const_ratio.size(); size_t next_ciphertext_size = coeff_count * ENCRYPTED_SIZE * next_coeff_modulus_size; // TODO: q_l // encrypted size_t inv_last_coeff_mod_array_size = context.inv_last_coeff_mod_array.size(); size_t encrypted_size = encrypted.size(); // TODO: precaliculate destination size from input parameters. size_t destination_size = ENCRYPTED_SIZE * coeff_count * next_coeff_modulus_size; #ifndef NDEBUG print_log("Allocate Device Memeory"); #endif auto device_encrypted = cuda::make_unique<uint64_t[]>(encrypted_size); auto device_destination = cuda::make_unique<uint64_t[]>(destination_size); auto device_coeff_modulus = cuda::make_unique<uint64_t[]>(coeff_modulus_size); auto device_coeff_modulus_const_ratio = cuda::make_unique<uint64_t[]>(coeff_modulus_const_ratio_size); // TODO: this may be unnecessary. auto device_next_coeff_modulus = cuda::make_unique<uint64_t[]>(next_coeff_modulus_size); auto device_temp1 = cuda::make_unique<uint64_t[]>(coeff_count); // t auto device_temp2 = cuda::make_unique<uint64_t[]>(next_ciphertext_size); // u auto device_ntt_root_powers = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_inv_root_powers_div_two = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_scaled_root_powers = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_scaled_inv_root_powers_div_two = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_inv_last_coeff_mod_array = cuda::make_unique<uint64_t[]>(inv_last_coeff_mod_array_size); Timer timer; timer.Start(); #ifndef NDEBUG print_log("Copy to Device Memeory"); #endif cuda::CHECK_CUDA_ERROR(::hipMemcpyAsync( device_encrypted.get(), encrypted.data(), sizeof(uint64_t) * encrypted_size, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::hipMemcpyAsync( device_coeff_modulus.get(), context.coeff_modulus.data(), sizeof(uint64_t) * coeff_modulus_size, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::hipMemcpyAsync(device_coeff_modulus_const_ratio.get(), context.coeff_modulus_const_ratio.data(), sizeof(uint64_t) * coeff_modulus_const_ratio_size, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::hipMemcpyAsync( device_next_coeff_modulus.get(), context.next_coeff_modulus.data(), sizeof(uint64_t) * next_coeff_modulus_size, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::hipMemcpyAsync( device_ntt_root_powers.get(), context.ntt_root_powers.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::hipMemcpyAsync(device_ntt_inv_root_powers_div_two.get(), context.ntt_inv_root_powers_div_two.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::hipMemcpyAsync(device_ntt_scaled_root_powers.get(), context.ntt_scaled_root_powers.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::hipMemcpyAsync(device_ntt_scaled_inv_root_powers_div_two.get(), context.ntt_scaled_inv_root_powers_div_two.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::hipMemcpyAsync(device_inv_last_coeff_mod_array.get(), context.inv_last_coeff_mod_array.data(), sizeof(uint64_t) * inv_last_coeff_mod_array_size, hipMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(hipDeviceSynchronize()); timer.Stop(); auto data_transmission_time = timer.Duration().count(); /* * If the device memory leaks, expand heap size with this (default 8MB) * This expand heap size up to 1GB * http://dfukunaga.hatenablog.com/entry/2017/10/28/163538 */ // size_t device_heap_size = 1024 * 1024 * 1024; // hipDeviceSetLimit(hipLimitMallocHeapSize, size); #ifndef NDEBUG print_poly(encrypted, coeff_count, 10); #endif size_t num_blocks = (coeff_modulus_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; timer.Start(); hipLaunchKernelGGL(( transform_from_ntt_inplace), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, 0, device_encrypted.get(), device_coeff_modulus.get(), coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_inv_root_powers_div_two.get(), device_ntt_scaled_inv_root_powers_div_two.get()); cuda::CHECK_CUDA_ERROR(hipDeviceSynchronize()); timer.Stop(); auto inverse_ntt_time = timer.Duration().count(); auto rescale_whole_time = inverse_ntt_time; timer.Start(); #ifndef NDEBUG print_log("Perform mod_switch_scale_to_next"); #endif hipLaunchKernelGGL(( mod_switch_scale_to_next), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, 0, device_encrypted.get(), device_destination.get(), device_coeff_modulus.get(), device_coeff_modulus_const_ratio.get(), device_next_coeff_modulus.get(), encrypted_size, destination_size, coeff_modulus_size, next_coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_root_powers.get(), device_ntt_scaled_root_powers.get(), device_ntt_inv_root_powers_div_two.get(), device_ntt_scaled_inv_root_powers_div_two.get(), device_temp1.get(), device_temp2.get(), device_inv_last_coeff_mod_array.get()); cuda::CHECK_CUDA_ERROR(hipDeviceSynchronize()); timer.Stop(); auto rescale_time = timer.Duration().count(); rescale_whole_time += rescale_time; timer.Start(); hipLaunchKernelGGL(( transform_to_ntt_inplace), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, 0, device_destination.get(), device_coeff_modulus.get(), next_coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_root_powers.get(), device_ntt_scaled_root_powers.get()); cuda::CHECK_CUDA_ERROR(hipDeviceSynchronize()); timer.Stop(); auto ntt_time = timer.Duration().count(); rescale_whole_time += ntt_time; timer.Start(); #ifndef NDEBUG print_log("Get the result from GPU"); #endif destination.resize(destination_size); cuda::CHECK_CUDA_ERROR(::hipMemcpyAsync( destination.data(), device_destination.get(), sizeof(uint64_t) * destination_size, hipMemcpyDeviceToHost)); cuda::CHECK_CUDA_ERROR(hipDeviceSynchronize()); timer.Stop(); data_transmission_time += timer.Duration().count(); #ifndef NDEBUG print_poly(destination, coeff_count, 10); #endif #ifndef NDEBUG auto rescale_ratio = rescale_time / rescale_whole_time; auto ntt_ratio = ntt_time / rescale_whole_time; auto inverse_ntt_ratio = inverse_ntt_time / rescale_whole_time; cout << "rescale_time(iNTT+Rescale+NTT): " << rescale_whole_time << " [us]" << endl; cout << "\t" << "iNTT: " << inverse_ntt_time << " [us] (" << inverse_ntt_ratio << ")" << endl; cout << "\t" << "Rescale: " << rescale_time << " [us] (" << rescale_ratio << ")" << endl; cout << "\t" << "NTT: " << ntt_time << " [us] (" << ntt_ratio << ")" << endl; cout << "data_transmission_time(sum): " << data_transmission_time << " [us]" << endl; #endif return ElapsedTime{rescale_time, ntt_time, inverse_ntt_time, data_transmission_time}; } __global__ void mod_switch_scale_to_next( uint64_t_array encrypted, uint64_t_array destination, const uint64_t_array coeff_modulus, const uint64_t_array coeff_modulus_const_ratio, // std::array<uint64_t, 3> // SmallModulus::const_ratio_ const uint64_t_array next_coeff_modulus, size_t encrypted_size, size_t destination_size, size_t coeff_modulus_size, size_t next_coeff_modulus_size, size_t coeff_count, int coeff_count_power, uint64_t_array ntt_root_powers, uint64_t_array ntt_scaled_root_powers, uint64_t_array ntt_inv_root_powers_div_two, uint64_t_array ntt_scaled_inv_root_powers_div_two, uint64_t_array temp1, // t uint64_t_array temp2, // u uint64_t_array inv_last_coeff_mod_array // q_l^-1 mod q_i ) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // size_t num_blocks = // (coeff_modulus_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (tid == 0) { auto last_modulus_index = coeff_modulus_size - 1; auto last_modulus = coeff_modulus[last_modulus_index]; uint64_t half = last_modulus >> 1; #ifndef NDEBUG printf("\tq_l: %llu\n", last_modulus); printf("\tq_l/2: %llu\n", half); #endif auto temp2_ptr = temp2; for (size_t i = 0; i < ENCRYPTED_SIZE; i++) { #ifndef NDEBUG printf("evaluating c_%llu...\n", i); #endif const auto c_i = get_poly(encrypted, i, coeff_count, coeff_modulus_size); set_uint_uint(c_i + next_coeff_modulus_size * coeff_count, coeff_count, temp1); for (size_t j = 0; j < coeff_count; j++) { temp1[j] = barret_reduce_63(temp1[j] + half, last_modulus, get_const_ratio(coeff_modulus_const_ratio, last_modulus_index)); } for (size_t mod_index = 0; mod_index < next_coeff_modulus_size; mod_index++, temp2_ptr += coeff_count) { auto const_ratio = get_const_ratio(coeff_modulus_const_ratio, mod_index); #ifndef NDEBUG printf("mod_index: %llu\n", mod_index); printf("\tcoeff_modulus: %llu\n", coeff_modulus[mod_index]); printf("\tconst_ratio: %llu %llu %llu\n", const_ratio[0], const_ratio[1], const_ratio[2]); #endif // (ct mod qk) mod qi modulo_poly_coeffs_63(temp1, coeff_count, coeff_modulus[mod_index], const_ratio, temp2_ptr); uint64_t half_mod = barret_reduce_63( half, coeff_modulus[mod_index], get_const_ratio(coeff_modulus_const_ratio, mod_index)); #ifndef NDEBUG printf("\tq_l/2 mod q_%llu: %llu\n", mod_index, half_mod); #endif for (size_t j = 0; j < coeff_count; j++) { temp2_ptr[j] = sub_uint_uint_mod(temp2_ptr[j], half_mod, coeff_modulus[mod_index]); // printf("%d\n", temp2_ptr[j]); } // ((ct mod qi) - (ct mod qk)) mod qi sub_poly_poly_coeffmod( get_poly(encrypted, i, coeff_count, coeff_modulus_size) + mod_index * coeff_count, temp2_ptr, coeff_count, coeff_modulus[mod_index], temp2_ptr); // // qk^(-1) * ((ct mod qi) - (ct mod qk)) mod qi multiply_poly_scalar_coeffmod( temp2_ptr, coeff_count, inv_last_coeff_mod_array[mod_index], coeff_modulus[mod_index], const_ratio, temp2_ptr); } } set_poly_poly(temp2, coeff_count * ENCRYPTED_SIZE, next_coeff_modulus_size, destination); } } __device__ void multiply_poly_scalar_coeffmod( const uint64_t *poly, size_t coeff_count, uint64_t scalar, const uint64_t modulus, const uint64_t_array const_ratio, uint64_t *result) { // Explicit inline // for (int i = 0; i < coeff_count; i++) //{ // *result++ = multiply_uint_uint_mod(*poly++, scalar, modulus); //} const uint64_t const_ratio_0 = const_ratio[0]; const uint64_t const_ratio_1 = const_ratio[1]; for (; coeff_count--; poly++, result++) { unsigned long long z[2], tmp1, tmp2[2], tmp3, carry; multiply_uint64(*poly, scalar, z); // Reduces z using base 2^64 Barrett reduction // Multiply input and const_ratio // Round 1 multiply_uint64_hw64(z[0], const_ratio_0, &carry); multiply_uint64(z[0], const_ratio_1, tmp2); tmp3 = tmp2[1] + add_uint64(tmp2[0], carry, &tmp1); // Round 2 multiply_uint64(z[1], const_ratio_0, tmp2); carry = tmp2[1] + add_uint64(tmp1, tmp2[0], &tmp1); // This is all we care about tmp1 = z[1] * const_ratio_1 + tmp3 + carry; // Barrett subtraction tmp3 = z[0] - tmp1 * modulus; // Claim: One more subtraction is enough *result = tmp3 - (modulus & static_cast<uint64_t>( -static_cast<int64_t>(tmp3 >= modulus))); } } // TODO: change to default cuda kernel(__global__) // NOTE: It works!!!!!!!!!!!!!!!!Yah!!!!!!!!!!!!!!!!!! __global__ void transform_from_ntt_inplace( uint64_t_array encrypted_ntt, // ciphertext uint64_t_array coeff_modulus, size_t coeff_modulus_count, // coeff modulus size_t coeff_count, // poly_modulus_degree int coeff_count_power, // lg(poly_modulus_degree) uint64_t_array ntt_inv_root_powers_div_two, uint64_t_array ntt_scaled_inv_root_powers_div_two) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // const size_t poly_uint64_count = coeff_count * coeff_modulus_count; const size_t i = tid / coeff_modulus_count; const size_t j = tid - coeff_modulus_count * i; if (tid < ENCRYPTED_SIZE * coeff_modulus_count) { inverse_ntt_negacyclic_harvey( get_poly(encrypted_ntt, i, coeff_count, coeff_modulus_count) + (j * coeff_count), coeff_count_power, coeff_modulus[j], ntt_inv_root_powers_div_two + coeff_count * j, ntt_scaled_inv_root_powers_div_two + coeff_count * j); } } // TODO: change to default cuda kernel(__global__) // it works!!!!!!!!!!!!!!!!!!!!!!yah!!!!!!!!!!!!!!!!! __global__ void transform_to_ntt_inplace( uint64_t_array encrypted, // ciphertext uint64_t_array coeff_modulus, size_t coeff_modulus_count, // coeff modulus size_t coeff_count, // poly_modulus_degree int coeff_count_power, // lg(poly_modulus_degree) uint64_t_array ntt_root_powers, uint64_t_array ntt_scaled_root_powers) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // const size_t poly_uint64_count = coeff_count * coeff_modulus_count; const size_t i = tid / coeff_modulus_count; const size_t j = tid - coeff_modulus_count * i; // #pragma unroll if (tid < ENCRYPTED_SIZE * coeff_modulus_count) { ntt_negacyclic_harvey( get_poly(encrypted, i, coeff_count, coeff_modulus_count) + (j * coeff_count), coeff_count_power, coeff_modulus[j], ntt_root_powers + coeff_count * j, ntt_scaled_root_powers + coeff_count * j); } } // Inverse negacyclic NTT using Harvey's butterfly. (See Patrick Longa and // Michael Naehrig). __device__ void inverse_ntt_negacyclic_harvey_lazy( uint64_t_array operand, uint64_t modulus, int coeff_count_power, uint64_t_array inv_root_powers_div_two, uint64_t_array scaled_inv_root_powers_div_two) { uint64_t two_times_modulus = modulus * 2; // return the bit-reversed order of NTT. size_t n = size_t(1) << coeff_count_power; size_t t = 1; // printf("n = %llu, t = %llu\n", n, t); for (size_t m = n; m > 1; m >>= 1) { size_t j1 = 0; size_t h = m >> 1; // printf("m = %llu, t = %llu, h = %llu\n", m, t, h); if (t >= 4) { for (size_t i = 0; i < h; i++) { size_t j2 = j1 + t; // Need the powers of phi^{-1} in bit-reversed order const uint64_t W = inv_root_powers_div_two[h + i]; const uint64_t Wprime = scaled_inv_root_powers_div_two[h + i]; // printf("\tW = %llu, Wprime = %llu\n", W, Wprime); uint64_t *U = operand + j1; uint64_t *V = U + t; uint64_t currU; uint64_t T; unsigned long long H; for (size_t j = j1; j < j2; j += 4) { T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; } j1 += (t << 1); } } else { for (size_t i = 0; i < h; i++) { size_t j2 = j1 + t; // Need the powers of phi^{-1} in bit-reversed order const uint64_t W = inv_root_powers_div_two[h + i]; const uint64_t Wprime = scaled_inv_root_powers_div_two[h + i]; uint64_t *U = operand + j1; uint64_t *V = U + t; uint64_t currU; uint64_t T; unsigned long long H; for (size_t j = j1; j < j2; j++) { // U = x[i], V = x[i+m] // Compute U - V + 2q T = two_times_modulus - *V + *U; // Cleverly check whether currU + currV >= // two_times_modulus currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); // Need to make it so that div2_uint_mod takes values // that are > q. div2_uint_mod(U, modulusptr, // coeff_uint64_count, U); We use also the fact that // parity of currU is same as parity of T. Since our // modulus is always so small that currU + // masked_modulus < 2^64, we never need to worry about // wrapping around when adding masked_modulus. uint64_t // masked_modulus = modulus & // static_cast<uint64_t>(-static_cast<int64_t>(T & 1)); // uint64_t carry = add_uint64(currU, masked_modulus, 0, // &currU); currU += modulus & // static_cast<uint64_t>(-static_cast<int64_t>(T & 1)); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); // effectively, the next two multiply perform multiply // modulo beta = 2**wordsize. *V++ = W * T - H * modulus; } j1 += (t << 1); } } t <<= 1; } } __device__ void ntt_negacyclic_harvey_lazy(uint64_t_array operand, uint64_t modulus, int coeff_count_power, uint64_t_array root_powers, uint64_t_array scaled_root_powers) { auto two_times_modulus = modulus * 2; // Return the NTT in scrambled order size_t n = size_t(1) << coeff_count_power; size_t t = n >> 1; for (size_t m = 1; m < n; m <<= 1) { if (t >= 4) { for (size_t i = 0; i < m; i++) { size_t j1 = 2 * i * t; size_t j2 = j1 + t; const uint64_t W = root_powers[m + i]; const uint64_t Wprime = scaled_root_powers[m + i]; uint64_t *X = operand + j1; uint64_t *Y = X + t; uint64_t currX; unsigned long long Q; for (size_t j = j1; j < j2; j += 4) { currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); } } } else { for (size_t i = 0; i < m; i++) { size_t j1 = 2 * i * t; size_t j2 = j1 + t; const uint64_t W = root_powers[m + i]; const uint64_t Wprime = scaled_root_powers[m + i]; uint64_t *X = operand + j1; uint64_t *Y = X + t; uint64_t currX; unsigned long long Q; for (size_t j = j1; j < j2; j++) { // The Harvey butterfly: assume X, Y in [0, 2p), and // return X', Y' in [0, 2p). X', Y' = X + WY, X - WY // (mod p). currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = W * *Y - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); } } } t >>= 1; } } __global__ void kernel(void) { printf("Hello from CUDA kernel.\n"); } void proxy(void) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, ); }
e859ff1d81dfd8182563109e622e1e5008fce28a.cu
// // Created by kotaro on 2019/12/13. // #include <cstdio> #include <iostream> #include "functions.h" #include "timer.hpp" /* * 1. pour data from Ciphertext * 2. memcpy to GPU * 3. perform evaluation * 4. send back to host * 5. Update Ciphertext instances */ /* * NOTE: * small_ntt_tables contains 'coeff_mod_count' of small_ntt_table */ // --------------------------------------------------------------------------- ElapsedTime rescale_to_next(const CuCiphertext &encrypted, CuCiphertext &destination, const CudaContextData &context, int cuda_device_id) { // http://www.slis.tsukuba.ac.jp/~fujisawa.makoto.fu/cgi-bin/wiki/index.php?CUDA%A4%C7%B9%D4%CE%F3%B1%E9%BB%BB%A1%A7%B2%C3%B8%BA%BB%BB cudaSetDevice(cuda_device_id); #ifndef NDEBUG for (size_t i = 0; i < context.next_coeff_modulus.size(); i++) { assert(context.coeff_modulus.at(i) == context.next_coeff_modulus.at(i)); } #endif size_t coeff_count = context.coeff_count; int coeff_count_power = context.coeff_count_power; #ifndef NDEBUG cout << "Coeff Count: " << coeff_count << endl; cout << "Coeff Count Power: " << coeff_count_power << endl; #endif size_t coeff_modulus_size = context.coeff_modulus.size(); size_t coeff_modulus_const_ratio_size = context.coeff_modulus_const_ratio.size(); size_t next_coeff_modulus_size = context.next_coeff_modulus.size(); size_t next_coeff_modulus_const_ratio_size = context.next_coeff_modulus_const_ratio.size(); size_t next_ciphertext_size = coeff_count * ENCRYPTED_SIZE * next_coeff_modulus_size; // TODO: 各q_lごとに存在する気がするので修正する // ひとまずは動かすこと優先なのでencryptedが対応するレベルのものだけ取り込む. size_t inv_last_coeff_mod_array_size = context.inv_last_coeff_mod_array.size(); size_t encrypted_size = encrypted.size(); // TODO: precaliculate destination size from input parameters. size_t destination_size = ENCRYPTED_SIZE * coeff_count * next_coeff_modulus_size; #ifndef NDEBUG print_log("Allocate Device Memeory"); #endif auto device_encrypted = cuda::make_unique<uint64_t[]>(encrypted_size); auto device_destination = cuda::make_unique<uint64_t[]>(destination_size); auto device_coeff_modulus = cuda::make_unique<uint64_t[]>(coeff_modulus_size); auto device_coeff_modulus_const_ratio = cuda::make_unique<uint64_t[]>(coeff_modulus_const_ratio_size); // TODO: this may be unnecessary. auto device_next_coeff_modulus = cuda::make_unique<uint64_t[]>(next_coeff_modulus_size); auto device_temp1 = cuda::make_unique<uint64_t[]>(coeff_count); // t auto device_temp2 = cuda::make_unique<uint64_t[]>(next_ciphertext_size); // u auto device_ntt_root_powers = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_inv_root_powers_div_two = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_scaled_root_powers = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_ntt_scaled_inv_root_powers_div_two = cuda::make_unique<uint64_t[]>(coeff_count * coeff_modulus_size); auto device_inv_last_coeff_mod_array = cuda::make_unique<uint64_t[]>(inv_last_coeff_mod_array_size); Timer timer; timer.Start(); #ifndef NDEBUG print_log("Copy to Device Memeory"); #endif cuda::CHECK_CUDA_ERROR(::cudaMemcpyAsync( device_encrypted.get(), encrypted.data(), sizeof(uint64_t) * encrypted_size, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::cudaMemcpyAsync( device_coeff_modulus.get(), context.coeff_modulus.data(), sizeof(uint64_t) * coeff_modulus_size, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::cudaMemcpyAsync(device_coeff_modulus_const_ratio.get(), context.coeff_modulus_const_ratio.data(), sizeof(uint64_t) * coeff_modulus_const_ratio_size, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::cudaMemcpyAsync( device_next_coeff_modulus.get(), context.next_coeff_modulus.data(), sizeof(uint64_t) * next_coeff_modulus_size, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(::cudaMemcpyAsync( device_ntt_root_powers.get(), context.ntt_root_powers.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::cudaMemcpyAsync(device_ntt_inv_root_powers_div_two.get(), context.ntt_inv_root_powers_div_two.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::cudaMemcpyAsync(device_ntt_scaled_root_powers.get(), context.ntt_scaled_root_powers.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::cudaMemcpyAsync(device_ntt_scaled_inv_root_powers_div_two.get(), context.ntt_scaled_inv_root_powers_div_two.data(), sizeof(uint64_t) * coeff_modulus_size * coeff_count, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR( ::cudaMemcpyAsync(device_inv_last_coeff_mod_array.get(), context.inv_last_coeff_mod_array.data(), sizeof(uint64_t) * inv_last_coeff_mod_array_size, cudaMemcpyHostToDevice)); cuda::CHECK_CUDA_ERROR(cudaDeviceSynchronize()); timer.Stop(); auto data_transmission_time = timer.Duration().count(); /* * If the device memory leaks, expand heap size with this (default 8MB) * This expand heap size up to 1GB * http://dfukunaga.hatenablog.com/entry/2017/10/28/163538 */ // size_t device_heap_size = 1024 * 1024 * 1024; // cudaDeviceSetLimit(cudaLimitMallocHeapSize, size); #ifndef NDEBUG print_poly(encrypted, coeff_count, 10); #endif size_t num_blocks = (coeff_modulus_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; timer.Start(); transform_from_ntt_inplace<<<num_blocks, THREADS_PER_BLOCK>>>( device_encrypted.get(), device_coeff_modulus.get(), coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_inv_root_powers_div_two.get(), device_ntt_scaled_inv_root_powers_div_two.get()); cuda::CHECK_CUDA_ERROR(cudaDeviceSynchronize()); timer.Stop(); auto inverse_ntt_time = timer.Duration().count(); auto rescale_whole_time = inverse_ntt_time; timer.Start(); #ifndef NDEBUG print_log("Perform mod_switch_scale_to_next"); #endif mod_switch_scale_to_next<<<num_blocks, THREADS_PER_BLOCK>>>( device_encrypted.get(), device_destination.get(), device_coeff_modulus.get(), device_coeff_modulus_const_ratio.get(), device_next_coeff_modulus.get(), encrypted_size, destination_size, coeff_modulus_size, next_coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_root_powers.get(), device_ntt_scaled_root_powers.get(), device_ntt_inv_root_powers_div_two.get(), device_ntt_scaled_inv_root_powers_div_two.get(), device_temp1.get(), device_temp2.get(), device_inv_last_coeff_mod_array.get()); cuda::CHECK_CUDA_ERROR(cudaDeviceSynchronize()); timer.Stop(); auto rescale_time = timer.Duration().count(); rescale_whole_time += rescale_time; timer.Start(); transform_to_ntt_inplace<<<num_blocks, THREADS_PER_BLOCK>>>( device_destination.get(), device_coeff_modulus.get(), next_coeff_modulus_size, coeff_count, coeff_count_power, device_ntt_root_powers.get(), device_ntt_scaled_root_powers.get()); cuda::CHECK_CUDA_ERROR(cudaDeviceSynchronize()); timer.Stop(); auto ntt_time = timer.Duration().count(); rescale_whole_time += ntt_time; timer.Start(); #ifndef NDEBUG print_log("Get the result from GPU"); #endif destination.resize(destination_size); cuda::CHECK_CUDA_ERROR(::cudaMemcpyAsync( destination.data(), device_destination.get(), sizeof(uint64_t) * destination_size, cudaMemcpyDeviceToHost)); cuda::CHECK_CUDA_ERROR(cudaDeviceSynchronize()); timer.Stop(); data_transmission_time += timer.Duration().count(); #ifndef NDEBUG print_poly(destination, coeff_count, 10); #endif #ifndef NDEBUG auto rescale_ratio = rescale_time / rescale_whole_time; auto ntt_ratio = ntt_time / rescale_whole_time; auto inverse_ntt_ratio = inverse_ntt_time / rescale_whole_time; cout << "rescale_time(iNTT+Rescale+NTT): " << rescale_whole_time << " [us]" << endl; cout << "\t" << "iNTT: " << inverse_ntt_time << " [us] (" << inverse_ntt_ratio << ")" << endl; cout << "\t" << "Rescale: " << rescale_time << " [us] (" << rescale_ratio << ")" << endl; cout << "\t" << "NTT: " << ntt_time << " [us] (" << ntt_ratio << ")" << endl; cout << "data_transmission_time(sum): " << data_transmission_time << " [us]" << endl; #endif return ElapsedTime{rescale_time, ntt_time, inverse_ntt_time, data_transmission_time}; } __global__ void mod_switch_scale_to_next( uint64_t_array encrypted, uint64_t_array destination, const uint64_t_array coeff_modulus, const uint64_t_array coeff_modulus_const_ratio, // std::array<uint64_t, 3> // SmallModulus::const_ratio_ const uint64_t_array next_coeff_modulus, size_t encrypted_size, size_t destination_size, size_t coeff_modulus_size, size_t next_coeff_modulus_size, size_t coeff_count, int coeff_count_power, uint64_t_array ntt_root_powers, uint64_t_array ntt_scaled_root_powers, uint64_t_array ntt_inv_root_powers_div_two, uint64_t_array ntt_scaled_inv_root_powers_div_two, uint64_t_array temp1, // t uint64_t_array temp2, // u uint64_t_array inv_last_coeff_mod_array // q_l^-1 mod q_i ) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // size_t num_blocks = // (coeff_modulus_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (tid == 0) { auto last_modulus_index = coeff_modulus_size - 1; auto last_modulus = coeff_modulus[last_modulus_index]; uint64_t half = last_modulus >> 1; #ifndef NDEBUG printf("\tq_l: %llu\n", last_modulus); printf("\tq_l/2: %llu\n", half); #endif auto temp2_ptr = temp2; for (size_t i = 0; i < ENCRYPTED_SIZE; i++) { #ifndef NDEBUG printf("evaluating c_%llu...\n", i); #endif const auto c_i = get_poly(encrypted, i, coeff_count, coeff_modulus_size); set_uint_uint(c_i + next_coeff_modulus_size * coeff_count, coeff_count, temp1); for (size_t j = 0; j < coeff_count; j++) { temp1[j] = barret_reduce_63(temp1[j] + half, last_modulus, get_const_ratio(coeff_modulus_const_ratio, last_modulus_index)); } for (size_t mod_index = 0; mod_index < next_coeff_modulus_size; mod_index++, temp2_ptr += coeff_count) { auto const_ratio = get_const_ratio(coeff_modulus_const_ratio, mod_index); #ifndef NDEBUG printf("mod_index: %llu\n", mod_index); printf("\tcoeff_modulus: %llu\n", coeff_modulus[mod_index]); printf("\tconst_ratio: %llu %llu %llu\n", const_ratio[0], const_ratio[1], const_ratio[2]); #endif // (ct mod qk) mod qi modulo_poly_coeffs_63(temp1, coeff_count, coeff_modulus[mod_index], const_ratio, temp2_ptr); uint64_t half_mod = barret_reduce_63( half, coeff_modulus[mod_index], get_const_ratio(coeff_modulus_const_ratio, mod_index)); #ifndef NDEBUG printf("\tq_l/2 mod q_%llu: %llu\n", mod_index, half_mod); #endif for (size_t j = 0; j < coeff_count; j++) { temp2_ptr[j] = sub_uint_uint_mod(temp2_ptr[j], half_mod, coeff_modulus[mod_index]); // printf("%d\n", temp2_ptr[j]); } // ((ct mod qi) - (ct mod qk)) mod qi sub_poly_poly_coeffmod( get_poly(encrypted, i, coeff_count, coeff_modulus_size) + mod_index * coeff_count, temp2_ptr, coeff_count, coeff_modulus[mod_index], temp2_ptr); // // qk^(-1) * ((ct mod qi) - (ct mod qk)) mod qi multiply_poly_scalar_coeffmod( temp2_ptr, coeff_count, inv_last_coeff_mod_array[mod_index], coeff_modulus[mod_index], const_ratio, temp2_ptr); } } set_poly_poly(temp2, coeff_count * ENCRYPTED_SIZE, next_coeff_modulus_size, destination); } } __device__ void multiply_poly_scalar_coeffmod( const uint64_t *poly, size_t coeff_count, uint64_t scalar, const uint64_t modulus, const uint64_t_array const_ratio, uint64_t *result) { // Explicit inline // for (int i = 0; i < coeff_count; i++) //{ // *result++ = multiply_uint_uint_mod(*poly++, scalar, modulus); //} const uint64_t const_ratio_0 = const_ratio[0]; const uint64_t const_ratio_1 = const_ratio[1]; for (; coeff_count--; poly++, result++) { unsigned long long z[2], tmp1, tmp2[2], tmp3, carry; multiply_uint64(*poly, scalar, z); // Reduces z using base 2^64 Barrett reduction // Multiply input and const_ratio // Round 1 multiply_uint64_hw64(z[0], const_ratio_0, &carry); multiply_uint64(z[0], const_ratio_1, tmp2); tmp3 = tmp2[1] + add_uint64(tmp2[0], carry, &tmp1); // Round 2 multiply_uint64(z[1], const_ratio_0, tmp2); carry = tmp2[1] + add_uint64(tmp1, tmp2[0], &tmp1); // This is all we care about tmp1 = z[1] * const_ratio_1 + tmp3 + carry; // Barrett subtraction tmp3 = z[0] - tmp1 * modulus; // Claim: One more subtraction is enough *result = tmp3 - (modulus & static_cast<uint64_t>( -static_cast<int64_t>(tmp3 >= modulus))); } } // TODO: change to default cuda kernel(__global__) // NOTE: It works!!!!!!!!!!!!!!!!Yah!!!!!!!!!!!!!!!!!! __global__ void transform_from_ntt_inplace( uint64_t_array encrypted_ntt, // ciphertext uint64_t_array coeff_modulus, size_t coeff_modulus_count, // coeff modulus size_t coeff_count, // poly_modulus_degree int coeff_count_power, // lg(poly_modulus_degree) uint64_t_array ntt_inv_root_powers_div_two, uint64_t_array ntt_scaled_inv_root_powers_div_two) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // const size_t poly_uint64_count = coeff_count * coeff_modulus_count; const size_t i = tid / coeff_modulus_count; const size_t j = tid - coeff_modulus_count * i; if (tid < ENCRYPTED_SIZE * coeff_modulus_count) { inverse_ntt_negacyclic_harvey( get_poly(encrypted_ntt, i, coeff_count, coeff_modulus_count) + (j * coeff_count), coeff_count_power, coeff_modulus[j], ntt_inv_root_powers_div_two + coeff_count * j, ntt_scaled_inv_root_powers_div_two + coeff_count * j); } } // TODO: change to default cuda kernel(__global__) // it works!!!!!!!!!!!!!!!!!!!!!!yah!!!!!!!!!!!!!!!!! __global__ void transform_to_ntt_inplace( uint64_t_array encrypted, // ciphertext uint64_t_array coeff_modulus, size_t coeff_modulus_count, // coeff modulus size_t coeff_count, // poly_modulus_degree int coeff_count_power, // lg(poly_modulus_degree) uint64_t_array ntt_root_powers, uint64_t_array ntt_scaled_root_powers) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; // const size_t poly_uint64_count = coeff_count * coeff_modulus_count; const size_t i = tid / coeff_modulus_count; const size_t j = tid - coeff_modulus_count * i; // #pragma unroll if (tid < ENCRYPTED_SIZE * coeff_modulus_count) { ntt_negacyclic_harvey( get_poly(encrypted, i, coeff_count, coeff_modulus_count) + (j * coeff_count), coeff_count_power, coeff_modulus[j], ntt_root_powers + coeff_count * j, ntt_scaled_root_powers + coeff_count * j); } } // Inverse negacyclic NTT using Harvey's butterfly. (See Patrick Longa and // Michael Naehrig). __device__ void inverse_ntt_negacyclic_harvey_lazy( uint64_t_array operand, uint64_t modulus, int coeff_count_power, uint64_t_array inv_root_powers_div_two, uint64_t_array scaled_inv_root_powers_div_two) { uint64_t two_times_modulus = modulus * 2; // return the bit-reversed order of NTT. size_t n = size_t(1) << coeff_count_power; size_t t = 1; // printf("n = %llu, t = %llu\n", n, t); for (size_t m = n; m > 1; m >>= 1) { size_t j1 = 0; size_t h = m >> 1; // printf("m = %llu, t = %llu, h = %llu\n", m, t, h); if (t >= 4) { for (size_t i = 0; i < h; i++) { size_t j2 = j1 + t; // Need the powers of phi^{-1} in bit-reversed order const uint64_t W = inv_root_powers_div_two[h + i]; const uint64_t Wprime = scaled_inv_root_powers_div_two[h + i]; // printf("\tW = %llu, Wprime = %llu\n", W, Wprime); uint64_t *U = operand + j1; uint64_t *V = U + t; uint64_t currU; uint64_t T; unsigned long long H; for (size_t j = j1; j < j2; j += 4) { T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; T = two_times_modulus - *V + *U; currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); *V++ = T * W - H * modulus; } j1 += (t << 1); } } else { for (size_t i = 0; i < h; i++) { size_t j2 = j1 + t; // Need the powers of phi^{-1} in bit-reversed order const uint64_t W = inv_root_powers_div_two[h + i]; const uint64_t Wprime = scaled_inv_root_powers_div_two[h + i]; uint64_t *U = operand + j1; uint64_t *V = U + t; uint64_t currU; uint64_t T; unsigned long long H; for (size_t j = j1; j < j2; j++) { // U = x[i], V = x[i+m] // Compute U - V + 2q T = two_times_modulus - *V + *U; // Cleverly check whether currU + currV >= // two_times_modulus currU = *U + *V - (two_times_modulus & static_cast<uint64_t>( -static_cast<int64_t>((*U << 1) >= T))); // Need to make it so that div2_uint_mod takes values // that are > q. div2_uint_mod(U, modulusptr, // coeff_uint64_count, U); We use also the fact that // parity of currU is same as parity of T. Since our // modulus is always so small that currU + // masked_modulus < 2^64, we never need to worry about // wrapping around when adding masked_modulus. uint64_t // masked_modulus = modulus & // static_cast<uint64_t>(-static_cast<int64_t>(T & 1)); // uint64_t carry = add_uint64(currU, masked_modulus, 0, // &currU); currU += modulus & // static_cast<uint64_t>(-static_cast<int64_t>(T & 1)); *U++ = (currU + (modulus & static_cast<uint64_t>( -static_cast<int64_t>(T & 1)))) >> 1; multiply_uint64_hw64(Wprime, T, &H); // effectively, the next two multiply perform multiply // modulo beta = 2**wordsize. *V++ = W * T - H * modulus; } j1 += (t << 1); } } t <<= 1; } } __device__ void ntt_negacyclic_harvey_lazy(uint64_t_array operand, uint64_t modulus, int coeff_count_power, uint64_t_array root_powers, uint64_t_array scaled_root_powers) { auto two_times_modulus = modulus * 2; // Return the NTT in scrambled order size_t n = size_t(1) << coeff_count_power; size_t t = n >> 1; for (size_t m = 1; m < n; m <<= 1) { if (t >= 4) { for (size_t i = 0; i < m; i++) { size_t j1 = 2 * i * t; size_t j2 = j1 + t; const uint64_t W = root_powers[m + i]; const uint64_t Wprime = scaled_root_powers[m + i]; uint64_t *X = operand + j1; uint64_t *Y = X + t; uint64_t currX; unsigned long long Q; for (size_t j = j1; j < j2; j += 4) { currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = *Y * W - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); } } } else { for (size_t i = 0; i < m; i++) { size_t j1 = 2 * i * t; size_t j2 = j1 + t; const uint64_t W = root_powers[m + i]; const uint64_t Wprime = scaled_root_powers[m + i]; uint64_t *X = operand + j1; uint64_t *Y = X + t; uint64_t currX; unsigned long long Q; for (size_t j = j1; j < j2; j++) { // The Harvey butterfly: assume X, Y in [0, 2p), and // return X', Y' in [0, 2p). X', Y' = X + WY, X - WY // (mod p). currX = *X - (two_times_modulus & static_cast<uint64_t>(-static_cast<int64_t>( *X >= two_times_modulus))); multiply_uint64_hw64(Wprime, *Y, &Q); Q = W * *Y - Q * modulus; *X++ = currX + Q; *Y++ = currX + (two_times_modulus - Q); } } } t >>= 1; } } __global__ void kernel(void) { printf("Hello from CUDA kernel.\n"); } void proxy(void) { kernel<<<1, 1>>>(); }
82623ce2621f342b7a30647feb4995f13e7fa65b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights // reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include "../raja.hpp" // ***************************************************************************** static __global__ void d_vector_op_eq0(const int N, const double c0, double* __restrict v0){ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) v0[i] = c0; } // ***************************************************************************** extern "C" __global__ void d_vector_op_eq(const int N, const double c0, double* __restrict v0){ const size_t blockSize = 128; const size_t gridSize = (N+blockSize-1)/blockSize; #if defined(RAJA_ENABLE_CUDA) hipLaunchKernelGGL(( d_vector_op_eq0), dim3(gridSize),dim3(blockSize), 0, 0, N,c0,v0); #elif defined(RAJA_ENABLE_HIP) hipLaunchKernelGGL((d_vector_op_eq0),dim3(gridSize),dim3(blockSize), 0, 0, N,c0,v0); #endif }
82623ce2621f342b7a30647feb4995f13e7fa65b.cu
// Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights // reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include "../raja.hpp" // ***************************************************************************** static __global__ void d_vector_op_eq0(const int N, const double c0, double* __restrict v0){ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) v0[i] = c0; } // ***************************************************************************** extern "C" __global__ void d_vector_op_eq(const int N, const double c0, double* __restrict v0){ const size_t blockSize = 128; const size_t gridSize = (N+blockSize-1)/blockSize; #if defined(RAJA_ENABLE_CUDA) d_vector_op_eq0<<<gridSize,blockSize>>>(N,c0,v0); #elif defined(RAJA_ENABLE_HIP) hipLaunchKernelGGL((d_vector_op_eq0),dim3(gridSize),dim3(blockSize), 0, 0, N,c0,v0); #endif }
8322ff50577a92f2fb3382f4dabeefb8fda6c950.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> #include <device_launch_parameters.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B */ #define BLOCK_SIZE 16 #define N 2048 __global__ void matrixMulCUDA(float *C, float *A, float *B, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row * n + col] = C_val; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int n) { // Allocate host memory for matrices A and B unsigned int size_A = n * n; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = n * n; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C unsigned int mem_size_C = n * n * sizeof(float); float *h_C = (float *)malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **)&d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **)&d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **)&d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid((n-1) / BLOCK_SIZE + 1, (n-1) / BLOCK_SIZE + 1, 1); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel hipLaunchKernelGGL(( matrixMulCUDA), dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B, n); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "Failed to launch kernel!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); printf("Elapsed time in msec = %f\n", msecTotal); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0 int devID = 0; hipSetDevice(devID); hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Size of square matrices size_t n = N; // printf("[-] N = "); // scanf("%u", &n); printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n); int matrix_result = matrixMultiply(argc, argv, n); exit(matrix_result); }
8322ff50577a92f2fb3382f4dabeefb8fda6c950.cu
// System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> #include <device_launch_parameters.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B */ #define BLOCK_SIZE 16 #define N 2048 __global__ void matrixMulCUDA(float *C, float *A, float *B, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row * n + col] = C_val; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int n) { // Allocate host memory for matrices A and B unsigned int size_A = n * n; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = n * n; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C unsigned int mem_size_C = n * n * sizeof(float); float *h_C = (float *)malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **)&d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid((n-1) / BLOCK_SIZE + 1, (n-1) / BLOCK_SIZE + 1, 1); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel matrixMulCUDA<<<grid, threads>>>(d_C, d_A, d_B, n); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "Failed to launch kernel!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); printf("Elapsed time in msec = %f\n", msecTotal); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0 int devID = 0; cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Size of square matrices size_t n = N; // printf("[-] N = "); // scanf("%u", &n); printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n); int matrix_result = matrixMultiply(argc, argv, n); exit(matrix_result); }
8b21298e37da6b4fce48debfab22f8f567ab3c73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "compute_scores.cuh" #include <catboost/cuda/methods/kernel/score_calcers.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <library/cuda/wrappers/arch.cuh> #include <contrib/libs/cub/hipcub/hipcub.hpp> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { #define ARGMAX() \ __shared__ float scores[BlockSize]; \ scores[tid] = bestScore; \ __shared__ int indices[BlockSize]; \ indices[tid] = bestIndex; \ __syncthreads();\ for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \ if (tid < s) { \ if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \ scores[tid] = scores[tid + s]; \ indices[tid] = indices[tid + s]; \ }\ }\ __syncthreads();\ } \ if (!tid) { \ const int index = indices[0];\ if (index != -1 && index < binFeatureCount) { \ result->FeatureId = bf[index].FeatureId;\ result->BinId = bf[index].BinId;\ result->Score = scores[0];\ } else {\ result->FeatureId = -1;\ result->BinId = -1;\ result->Score = FLT_MAX;\ }\ } // histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplits(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int pCount, const ui32* restPartIds, int restPartCount, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y * pCount; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); for (int i = 0; i < pCount; i++) { const int leafId = __ldg(partIds + i); const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f); double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + leafId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); } } //add fixed leaves for (int i = 0; i < restPartCount; i++) { const int leafId = __ldg(restPartIds + i); const float weight = max(__ldg(partStats + leafId * statCount), 0.0f); double totalSum = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { double sum = __ldg(partStats + leafId * statCount + statId); totalSumPart += sum; calcer.AddLeaf(sum, weight); totalSum += sum; } if (multiclassOptimization) { calcer.AddLeaf(-totalSum, weight); } } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = binFeatureId; } } ARGMAX() } void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int partBlockSize, int partBlockCount, const ui32* restPartIds, int restPartCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partBlockCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } template <int BlockSize> __global__ void ComputeTargetVarianceImpl(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats) { ui32 i = BlockSize * blockIdx.x + threadIdx.x; float weightedSum = 0; float weightedSum2 = 0; float totalWeight = 0; while (i < size) { const float w = stats[i]; if (w > 1e-15f) { float statSum = 0; for (ui32 statId = 1; statId < statCount; ++statId) { const float wt = stats[i + statLineSize * statId]; weightedSum += wt; weightedSum2 += wt * wt / w; //cause we need sum w * t * t statSum += wt; } if (isMulticlass) { weightedSum += -statSum; weightedSum2 += statSum * statSum / w; } totalWeight += w; } i += gridDim.x * BlockSize; } using BlockReduce = typename hipcub::BlockReduce<double, BlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; double blockWeightedSum = weightedSum; blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum); double blockWeightedSum2 = weightedSum2; blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2); double blockTotalWeight = totalWeight; blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight); if (threadIdx.x == 0) { TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum); TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2); TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight); } } void ComputeTargetVariance(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats, TCudaStream stream) { const ui32 blockSize = 512; const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize)); FillBuffer(aggregatedStats, 0.0, 3, stream); if (numBlocks) { hipLaunchKernelGGL(( ComputeTargetVarianceImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, stats, size, statCount, statLineSize, isMulticlass, aggregatedStats); } } template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y; const int thisPartId = partIds[0]; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); TScoreCalcer beforeSplitCalcer = calcer; const double partWeight = __ldg(partStats + thisPartId * statCount); const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(partWeight - weightLeft, 0.0f); bool toZeroPartSplit = false; if (weightLeft < 1e-20f || weightRight < 1e-20f) { toZeroPartSplit = true; } double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + thisPartId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); beforeSplitCalcer.AddLeaf(partStat, partWeight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight); } const bool skip = toZeroPartSplit; const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX; const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX; //-10 - 0 = -10 //in gpu catboost all scores are inverse, lower is better const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0; if (gain < bestScore) { bestScore = gain; bestIndex = binFeatureId; } } ARGMAX() } template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplit(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const int partId, const int maybeSecondPartId, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); TScoreCalcer beforeSplitCalcer = calcer; const double partWeight = __ldg(partStats + thisPartId * statCount); const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(partWeight - weightLeft, 0.0f); bool toZeroPartSplit = false; if (weightLeft < 1e-20f || weightRight < 1e-20f) { toZeroPartSplit = true; } double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + thisPartId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); beforeSplitCalcer.AddLeaf(partStat, partWeight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight); } const bool skip = toZeroPartSplit; const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX; const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX; //-10 - 0 = -10 //in gpu catboost all scores are inverse, lower is better const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0; if (gain < bestScore) { bestScore = gain; bestIndex = binFeatureId; } } ARGMAX() } void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int partCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, ui32 partId, ui32 maybeSecondPartId, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partId == maybeSecondPartId ? 1 : 2; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } //seems like this'll be faster on CPU template <class TScoreCalcer> void ComputeTreeScoreImpl(const double* partStats, int statCount, const ui32* allPartIds, int allPartCount, bool multiclassOptimization, TScoreCalcer calcer, double* result) { calcer.NextFeature(TCBinFeature({100500, 42})); for (int i = 0; i < allPartCount; ++i) { const int leafId = allPartIds[i]; const double weight = max(partStats[leafId * statCount], 0.0); double totalSum = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { double sum = partStats[leafId * statCount + statId]; totalSumPart += sum; calcer.AddLeaf(sum, weight); totalSum += sum; } if (multiclassOptimization) { calcer.AddLeaf(-totalSum, weight); } } result[0] = calcer.GetScore(); } void ComputeTreeScore( const double* partStats, int statCount, const ui32* allPartIds, int allPartCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, double* result, TCudaStream) { #define RUN() \ ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } #undef ARGMAX }
8b21298e37da6b4fce48debfab22f8f567ab3c73.cu
#include "compute_scores.cuh" #include <catboost/cuda/methods/kernel/score_calcers.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <library/cuda/wrappers/arch.cuh> #include <contrib/libs/cub/cub/block/block_reduce.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { #define ARGMAX() \ __shared__ float scores[BlockSize]; \ scores[tid] = bestScore; \ __shared__ int indices[BlockSize]; \ indices[tid] = bestIndex; \ __syncthreads();\ for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \ if (tid < s) { \ if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \ scores[tid] = scores[tid + s]; \ indices[tid] = indices[tid + s]; \ }\ }\ __syncthreads();\ } \ if (!tid) { \ const int index = indices[0];\ if (index != -1 && index < binFeatureCount) { \ result->FeatureId = bf[index].FeatureId;\ result->BinId = bf[index].BinId;\ result->Score = scores[0];\ } else {\ result->FeatureId = -1;\ result->BinId = -1;\ result->Score = FLT_MAX;\ }\ } // histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplits(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int pCount, const ui32* restPartIds, int restPartCount, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y * pCount; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); for (int i = 0; i < pCount; i++) { const int leafId = __ldg(partIds + i); const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f); double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + leafId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); } } //add fixed leaves for (int i = 0; i < restPartCount; i++) { const int leafId = __ldg(restPartIds + i); const float weight = max(__ldg(partStats + leafId * statCount), 0.0f); double totalSum = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { double sum = __ldg(partStats + leafId * statCount + statId); totalSumPart += sum; calcer.AddLeaf(sum, weight); totalSum += sum; } if (multiclassOptimization) { calcer.AddLeaf(-totalSum, weight); } } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = binFeatureId; } } ARGMAX() } void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int partBlockSize, int partBlockCount, const ui32* restPartIds, int restPartCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partBlockCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } template <int BlockSize> __global__ void ComputeTargetVarianceImpl(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats) { ui32 i = BlockSize * blockIdx.x + threadIdx.x; float weightedSum = 0; float weightedSum2 = 0; float totalWeight = 0; while (i < size) { const float w = stats[i]; if (w > 1e-15f) { float statSum = 0; for (ui32 statId = 1; statId < statCount; ++statId) { const float wt = stats[i + statLineSize * statId]; weightedSum += wt; weightedSum2 += wt * wt / w; //cause we need sum w * t * t statSum += wt; } if (isMulticlass) { weightedSum += -statSum; weightedSum2 += statSum * statSum / w; } totalWeight += w; } i += gridDim.x * BlockSize; } using BlockReduce = typename cub::BlockReduce<double, BlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; double blockWeightedSum = weightedSum; blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum); double blockWeightedSum2 = weightedSum2; blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2); double blockTotalWeight = totalWeight; blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight); if (threadIdx.x == 0) { TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum); TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2); TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight); } } void ComputeTargetVariance(const float* stats, ui32 size, ui32 statCount, ui64 statLineSize, bool isMulticlass, double* aggregatedStats, TCudaStream stream) { const ui32 blockSize = 512; const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize)); FillBuffer(aggregatedStats, 0.0, 3, stream); if (numBlocks) { ComputeTargetVarianceImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(stats, size, statCount, statLineSize, isMulticlass, aggregatedStats); } } template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; partIds += blockIdx.y; const int thisPartId = partIds[0]; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); TScoreCalcer beforeSplitCalcer = calcer; const double partWeight = __ldg(partStats + thisPartId * statCount); const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(partWeight - weightLeft, 0.0f); bool toZeroPartSplit = false; if (weightLeft < 1e-20f || weightRight < 1e-20f) { toZeroPartSplit = true; } double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + thisPartId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); beforeSplitCalcer.AddLeaf(partStat, partWeight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight); } const bool skip = toZeroPartSplit; const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX; const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX; //-10 - 0 = -10 //in gpu catboost all scores are inverse, lower is better const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0; if (gain < bestScore) { bestScore = gain; bestIndex = binFeatureId; } } ARGMAX() } template <int BlockSize, class TScoreCalcer> __global__ void ComputeOptimalSplit(const TCBinFeature* bf, ui32 binFeatureCount, const float* histograms, const double* partStats, int statCount, const int partId, const int maybeSecondPartId, bool multiclassOptimization, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = -1; int tid = threadIdx.x; result += blockIdx.x + blockIdx.y * gridDim.x; const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId; for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) { const int binFeatureId = offset + tid; if (binFeatureId >= binFeatureCount) { break; } calcer.NextFeature(bf[binFeatureId]); TScoreCalcer beforeSplitCalcer = calcer; const double partWeight = __ldg(partStats + thisPartId * statCount); const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f); const float weightRight = max(partWeight - weightLeft, 0.0f); bool toZeroPartSplit = false; if (weightLeft < 1e-20f || weightRight < 1e-20f) { toZeroPartSplit = true; } double totalSumLeft = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId); double partStat = __ldg(partStats + thisPartId * statCount + statId); totalSumPart += partStat; float sumRight = static_cast<float>(partStat - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); beforeSplitCalcer.AddLeaf(partStat, partWeight); totalSumLeft += sumLeft; } if (multiclassOptimization) { double totalSumRight = totalSumPart - totalSumLeft; calcer.AddLeaf(-totalSumLeft, weightLeft); calcer.AddLeaf(-totalSumRight, weightRight); beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight); } const bool skip = toZeroPartSplit; const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX; const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX; //-10 - 0 = -10 //in gpu catboost all scores are inverse, lower is better const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0; if (gain < bestScore) { bestScore = gain; bestIndex = binFeatureId; } } ARGMAX() } void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, const ui32* partIds, int partCount, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partCount; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount, const float* histograms, const double* partStats, int statCount, ui32 partId, ui32 maybeSecondPartId, TBestSplitProperties* result, ui32 argmaxBlockCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = argmaxBlockCount; numBlocks.y = partId == maybeSecondPartId ? 1 : 2; numBlocks.z = 1; #define RUN() \ ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } //seems like this'll be faster on CPU template <class TScoreCalcer> void ComputeTreeScoreImpl(const double* partStats, int statCount, const ui32* allPartIds, int allPartCount, bool multiclassOptimization, TScoreCalcer calcer, double* result) { calcer.NextFeature(TCBinFeature({100500, 42})); for (int i = 0; i < allPartCount; ++i) { const int leafId = allPartIds[i]; const double weight = max(partStats[leafId * statCount], 0.0); double totalSum = 0; double totalSumPart = 0; for (int statId = 1; statId < statCount; ++statId) { double sum = partStats[leafId * statCount + statId]; totalSumPart += sum; calcer.AddLeaf(sum, weight); totalSum += sum; } if (multiclassOptimization) { calcer.AddLeaf(-totalSum, weight); } } result[0] = calcer.GetScore(); } void ComputeTreeScore( const double* partStats, int statCount, const ui32* allPartIds, int allPartCount, EScoreFunction scoreFunction, bool multiclassOptimization, double l2, bool normalize, double scoreStdDev, ui64 seed, double* result, TCudaStream) { #define RUN() \ ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Cosine: case EScoreFunction::NewtonCosine: { using TScoreCalcer = TCosineScoreCalcer; TCosineScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } #undef ARGMAX }
e6662018e76792eaf10a17789de63ced8206eb4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "radonortho.cuh" #include "kernels_hip.cuh" #include <stdio.h> radonortho::radonortho(size_t ntheta, size_t n, size_t nz) : ntheta(ntheta), n(n), nz(nz) { // arrays allocation on GPU hipMalloc((void **)&fx, n * nz * sizeof(float)); hipMalloc((void **)&fy, n * nz * sizeof(float)); hipMalloc((void **)&fz, n * n * sizeof(float)); hipMalloc((void **)&g, n * ntheta * nz * sizeof(float)); hipMalloc((void **)&fg, (n / 2 + 1) * ntheta * nz * sizeof(float2)); hipMalloc((void **)&filter, (n / 2 + 1) * sizeof(float)); hipMalloc((void **)&theta, ntheta * sizeof(float)); hipMemset(fx,0,n*nz*sizeof(float)); hipMemset(fy,0,n*nz*sizeof(float)); hipMemset(fz,0,n*n*sizeof(float)); //fft plans for filtering int ffts[] = {n}; int idist = n; int odist = n / 2 + 1; int inembed[] = {n}; int onembed[] = {n / 2 + 1}; hipfftPlanMany(&plan_forward, 1, ffts, inembed, 1, idist, onembed, 1, odist, HIPFFT_R2C, ntheta * nz); hipfftPlanMany(&plan_inverse, 1, ffts, onembed, 1, odist, inembed, 1, idist, HIPFFT_C2R, ntheta * nz); //init thread blocks and block grids BS3d.x = 32; BS3d.y = 32; BS3d.z = 1; GS3d1.x = ceil(n / (float)BS3d.x); GS3d1.y = ceil(ntheta / (float)BS3d.y); GS3d1.z = ceil(nz / (float)BS3d.z); GS3d2.x = ceil(n / (float)BS3d.x); GS3d2.y = ceil(n / (float)BS3d.y); GS3d3.x = ceil(n / (float)BS3d.x); GS3d3.y = ceil(nz / (float)BS3d.y); is_free = false; } // destructor, memory deallocation radonortho::~radonortho() { free(); } void radonortho::free() { if (!is_free) { hipFree(g); hipFree(fg); hipFree(fx); hipFree(fy); hipFree(fz); hipFree(filter); hipFree(theta); hipfftDestroy(plan_forward); hipfftDestroy(plan_inverse); is_free = true; } } void radonortho::rec(size_t fx_,size_t fy_,size_t fz_, size_t g_, size_t theta_, float center, int ix, int iy, int iz, int flgx, int flgy, int flgz) { // copy data and angles to GPU hipMemcpy(g, (float *)g_, n * ntheta * nz * sizeof(float), hipMemcpyDefault); hipMemcpy(theta, (float *)theta_, ntheta * sizeof(float), hipMemcpyDefault); // fft for filtering in the frequency domain hipfftExecR2C(plan_forward, (hipfftReal *)g, (hipfftComplex *)fg); // parzen filtering hipLaunchKernelGGL(( applyfilter), dim3(GS3d1), dim3(BS3d), 0, 0, fg, filter, n, ntheta, nz); // fft back hipfftExecC2R(plan_inverse, (hipfftComplex *)fg, (hipfftReal *)g); //hipMemcpy((float *)g_, g, n * ntheta * nz * sizeof(float), hipMemcpyDefault); if(flgx) hipMemset(fx,0,n*nz*sizeof(float)); if(flgy) hipMemset(fy,0,n*nz*sizeof(float)); if(flgz) hipMemset(fz,0,n*n*sizeof(float)); // reconstruct slices via summation over lines hipLaunchKernelGGL(( ortho_kerx), dim3(GS3d3), dim3(BS3d), 0, 0, fx, g, theta, center, ix, n, ntheta, nz); hipLaunchKernelGGL(( ortho_kery), dim3(GS3d3), dim3(BS3d), 0, 0, fy, g, theta, center, iy, n, ntheta, nz); hipLaunchKernelGGL(( ortho_kerz), dim3(GS3d2), dim3(BS3d), 0, 0, fz, g, theta, center, iz, n, ntheta, nz); //copy result to cpu hipMemcpy((float *)fx_, fx, n * nz * sizeof(float), hipMemcpyDefault); hipMemcpy((float *)fy_, fy, n * nz * sizeof(float), hipMemcpyDefault); hipMemcpy((float *)fz_, fz, n * n * sizeof(float), hipMemcpyDefault); } void radonortho::set_filter(size_t filter_) { hipMemcpy(filter, (float*) filter_, (n/2+1)*sizeof(float),hipMemcpyDefault); }
e6662018e76792eaf10a17789de63ced8206eb4c.cu
#include "radonortho.cuh" #include "kernels.cuh" #include <stdio.h> radonortho::radonortho(size_t ntheta, size_t n, size_t nz) : ntheta(ntheta), n(n), nz(nz) { // arrays allocation on GPU cudaMalloc((void **)&fx, n * nz * sizeof(float)); cudaMalloc((void **)&fy, n * nz * sizeof(float)); cudaMalloc((void **)&fz, n * n * sizeof(float)); cudaMalloc((void **)&g, n * ntheta * nz * sizeof(float)); cudaMalloc((void **)&fg, (n / 2 + 1) * ntheta * nz * sizeof(float2)); cudaMalloc((void **)&filter, (n / 2 + 1) * sizeof(float)); cudaMalloc((void **)&theta, ntheta * sizeof(float)); cudaMemset(fx,0,n*nz*sizeof(float)); cudaMemset(fy,0,n*nz*sizeof(float)); cudaMemset(fz,0,n*n*sizeof(float)); //fft plans for filtering int ffts[] = {n}; int idist = n; int odist = n / 2 + 1; int inembed[] = {n}; int onembed[] = {n / 2 + 1}; cufftPlanMany(&plan_forward, 1, ffts, inembed, 1, idist, onembed, 1, odist, CUFFT_R2C, ntheta * nz); cufftPlanMany(&plan_inverse, 1, ffts, onembed, 1, odist, inembed, 1, idist, CUFFT_C2R, ntheta * nz); //init thread blocks and block grids BS3d.x = 32; BS3d.y = 32; BS3d.z = 1; GS3d1.x = ceil(n / (float)BS3d.x); GS3d1.y = ceil(ntheta / (float)BS3d.y); GS3d1.z = ceil(nz / (float)BS3d.z); GS3d2.x = ceil(n / (float)BS3d.x); GS3d2.y = ceil(n / (float)BS3d.y); GS3d3.x = ceil(n / (float)BS3d.x); GS3d3.y = ceil(nz / (float)BS3d.y); is_free = false; } // destructor, memory deallocation radonortho::~radonortho() { free(); } void radonortho::free() { if (!is_free) { cudaFree(g); cudaFree(fg); cudaFree(fx); cudaFree(fy); cudaFree(fz); cudaFree(filter); cudaFree(theta); cufftDestroy(plan_forward); cufftDestroy(plan_inverse); is_free = true; } } void radonortho::rec(size_t fx_,size_t fy_,size_t fz_, size_t g_, size_t theta_, float center, int ix, int iy, int iz, int flgx, int flgy, int flgz) { // copy data and angles to GPU cudaMemcpy(g, (float *)g_, n * ntheta * nz * sizeof(float), cudaMemcpyDefault); cudaMemcpy(theta, (float *)theta_, ntheta * sizeof(float), cudaMemcpyDefault); // fft for filtering in the frequency domain cufftExecR2C(plan_forward, (cufftReal *)g, (cufftComplex *)fg); // parzen filtering applyfilter<<<GS3d1, BS3d>>>(fg, filter, n, ntheta, nz); // fft back cufftExecC2R(plan_inverse, (cufftComplex *)fg, (cufftReal *)g); //cudaMemcpy((float *)g_, g, n * ntheta * nz * sizeof(float), cudaMemcpyDefault); if(flgx) cudaMemset(fx,0,n*nz*sizeof(float)); if(flgy) cudaMemset(fy,0,n*nz*sizeof(float)); if(flgz) cudaMemset(fz,0,n*n*sizeof(float)); // reconstruct slices via summation over lines ortho_kerx<<<GS3d3, BS3d>>>(fx, g, theta, center, ix, n, ntheta, nz); ortho_kery<<<GS3d3, BS3d>>>(fy, g, theta, center, iy, n, ntheta, nz); ortho_kerz<<<GS3d2, BS3d>>>(fz, g, theta, center, iz, n, ntheta, nz); //copy result to cpu cudaMemcpy((float *)fx_, fx, n * nz * sizeof(float), cudaMemcpyDefault); cudaMemcpy((float *)fy_, fy, n * nz * sizeof(float), cudaMemcpyDefault); cudaMemcpy((float *)fz_, fz, n * n * sizeof(float), cudaMemcpyDefault); } void radonortho::set_filter(size_t filter_) { cudaMemcpy(filter, (float*) filter_, (n/2+1)*sizeof(float),cudaMemcpyDefault); }
5a649aff8fe7d5aaadcfac73461152ec76523090.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, they use that to //calculate a 1D offset int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numCols && x < numRows) { int index = numRows*y +x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); int blocksX = numRows/blockWidth+1; int blocksY = numCols/blockWidth+1; //TODO const dim3 gridSize( blocksX, blocksY, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
5a649aff8fe7d5aaadcfac73461152ec76523090.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, they use that to //calculate a 1D offset int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numCols && x < numRows) { int index = numRows*y +x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); int blocksX = numRows/blockWidth+1; int blocksY = numCols/blockWidth+1; //TODO const dim3 gridSize( blocksX, blocksY, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
f667febc7458d34f84ffbc605d808859ffe0bc63.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <iostream> #include "cuda_util.h" #include <iostream> #include <cassert> #include <chrono> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "Matrix.h" typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint; __global__ void matrixMultiplication(float* matrix1, float* matrix2, float* result, int m1_cols, int m1_rows, int m2_cols, int m2_rows) { if (m1_cols != m2_rows) { throw std::length_error("False Matrix size! Can't mulitply."); } int res_X = blockDim.x * blockIdx.x + threadIdx.x; int res_Y = blockDim.y * blockIdx.y + threadIdx.y; int offset = threadIdx.x % m2_cols; for (int k = 0; k < m1_cols; ++k) { result[threadIdx.x] += matrix1[res_X + k] * matrix2[offset + m2_rows * k]; } } bool initDevice(int& device_handle, int& max_threads_per_block) { int deviceCount = 0; checkErrorsCuda(hipGetDeviceCount(&deviceCount)); if (0 == deviceCount) { std::cerr << "initDevice() : No CUDA device found." << std::endl; return false; } // one could implement more complex logic here to find the fastest device if (deviceCount > 1) { std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl; } // set the device checkErrorsCuda(hipSetDevice(device_handle)); hipDeviceProp_t device_props; checkErrorsCuda(hipGetDeviceProperties(&device_props, device_handle)); max_threads_per_block = device_props.maxThreadsPerBlock; return true; } int main (int /*argc*/, char** /*argv*/) { int i = 3, j = 3, k = 3; Matrix<float> matrix1_host(i, j); Matrix<float> matrix2_host(j, k); Matrix<float> result_host(matrix1_host.getRows(), matrix2_host.getCols()); matrix1_host.fillMatrix(); matrix1_host.printMatrix(); matrix2_host.fillMatrix(); matrix2_host.printMatrix(); // check execution environment int device_handle = 0; int max_threads_per_block = 0; if (!initDevice(device_handle, max_threads_per_block)) { return EXIT_FAILURE; } // initialize memory float* result_device = nullptr; float* matrix1_device = nullptr; float* matrix2_device = nullptr; // allocate device memory checkErrorsCuda(hipMalloc((void **)&result_device, sizeof(float) * result_host.getTotalSize())); checkErrorsCuda(hipMalloc((void **)&matrix1_device, sizeof(float) * matrix1_host.getTotalSize())); checkErrorsCuda(hipMalloc((void **)&matrix2_device, sizeof(float) * matrix2_host.getTotalSize())); // copy device memory checkErrorsCuda(hipMemcpy((void*)matrix1_device, &matrix1_host.m_ptValues, sizeof(float) * matrix1_host.getTotalSize(), hipMemcpyHostToDevice)); checkErrorsCuda(hipMemcpy((void*)matrix2_device, &matrix2_host.m_ptValues, sizeof(float) * matrix2_host.getTotalSize(), hipMemcpyHostToDevice)); // determine thread layout dim3 num_threads_per_block(1, 1, 1); dim3 num_blocks(1, 1, 1); int max_threads_per_block_sqrt = (int)std::sqrt((double)max_threads_per_block); assert(32 == max_threads_per_block_sqrt); num_blocks.x = result_host.getCols() / max_threads_per_block_sqrt; if (0 != result_host.getCols() % max_threads_per_block_sqrt) { num_blocks.x++; } num_blocks.y = result_host.getRows() / max_threads_per_block_sqrt; if (0 != result_host.getRows() % max_threads_per_block_sqrt) { num_blocks.y++; } num_threads_per_block.x = max_threads_per_block_sqrt; num_threads_per_block.y = max_threads_per_block_sqrt; // run kernel tpoint t_start = std::chrono::high_resolution_clock::now(); //convSeparable<kernel_supp_half> << < num_blocks, num_threads_per_block >> >(kernel_device, image_device, image_conv_device, image.n_rows); matrixMultiplication << <num_blocks, num_threads_per_block >> > (matrix1_device, matrix2_device, result_device, matrix1_host.getCols(), matrix1_host.getRows(), matrix2_host.getCols(), matrix2_host.getRows()); tpoint t_end = std::chrono::high_resolution_clock::now(); double wall_clock = std::chrono::duration<double, std::milli>(t_end - t_start).count(); std::cerr << "Execution time: " << wall_clock << " ms." << std::endl; checkLastCudaError("Kernel execution failed"); hipDeviceSynchronize(); // copy result back to host checkErrorsCuda(hipMemcpy(&result_host.m_ptValues, result_device, sizeof(float) * result_host.getTotalSize(), hipMemcpyDeviceToHost)); }
f667febc7458d34f84ffbc605d808859ffe0bc63.cu
#include <cstdlib> #include <iostream> #include "cuda_util.h" #include <iostream> #include <cassert> #include <chrono> #include <cuda_runtime.h> #include <cuda.h> #include "Matrix.h" typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint; __global__ void matrixMultiplication(float* matrix1, float* matrix2, float* result, int m1_cols, int m1_rows, int m2_cols, int m2_rows) { if (m1_cols != m2_rows) { throw std::length_error("False Matrix size! Can't mulitply."); } int res_X = blockDim.x * blockIdx.x + threadIdx.x; int res_Y = blockDim.y * blockIdx.y + threadIdx.y; int offset = threadIdx.x % m2_cols; for (int k = 0; k < m1_cols; ++k) { result[threadIdx.x] += matrix1[res_X + k] * matrix2[offset + m2_rows * k]; } } bool initDevice(int& device_handle, int& max_threads_per_block) { int deviceCount = 0; checkErrorsCuda(cudaGetDeviceCount(&deviceCount)); if (0 == deviceCount) { std::cerr << "initDevice() : No CUDA device found." << std::endl; return false; } // one could implement more complex logic here to find the fastest device if (deviceCount > 1) { std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl; } // set the device checkErrorsCuda(cudaSetDevice(device_handle)); cudaDeviceProp device_props; checkErrorsCuda(cudaGetDeviceProperties(&device_props, device_handle)); max_threads_per_block = device_props.maxThreadsPerBlock; return true; } int main (int /*argc*/, char** /*argv*/) { int i = 3, j = 3, k = 3; Matrix<float> matrix1_host(i, j); Matrix<float> matrix2_host(j, k); Matrix<float> result_host(matrix1_host.getRows(), matrix2_host.getCols()); matrix1_host.fillMatrix(); matrix1_host.printMatrix(); matrix2_host.fillMatrix(); matrix2_host.printMatrix(); // check execution environment int device_handle = 0; int max_threads_per_block = 0; if (!initDevice(device_handle, max_threads_per_block)) { return EXIT_FAILURE; } // initialize memory float* result_device = nullptr; float* matrix1_device = nullptr; float* matrix2_device = nullptr; // allocate device memory checkErrorsCuda(cudaMalloc((void **)&result_device, sizeof(float) * result_host.getTotalSize())); checkErrorsCuda(cudaMalloc((void **)&matrix1_device, sizeof(float) * matrix1_host.getTotalSize())); checkErrorsCuda(cudaMalloc((void **)&matrix2_device, sizeof(float) * matrix2_host.getTotalSize())); // copy device memory checkErrorsCuda(cudaMemcpy((void*)matrix1_device, &matrix1_host.m_ptValues, sizeof(float) * matrix1_host.getTotalSize(), cudaMemcpyHostToDevice)); checkErrorsCuda(cudaMemcpy((void*)matrix2_device, &matrix2_host.m_ptValues, sizeof(float) * matrix2_host.getTotalSize(), cudaMemcpyHostToDevice)); // determine thread layout dim3 num_threads_per_block(1, 1, 1); dim3 num_blocks(1, 1, 1); int max_threads_per_block_sqrt = (int)std::sqrt((double)max_threads_per_block); assert(32 == max_threads_per_block_sqrt); num_blocks.x = result_host.getCols() / max_threads_per_block_sqrt; if (0 != result_host.getCols() % max_threads_per_block_sqrt) { num_blocks.x++; } num_blocks.y = result_host.getRows() / max_threads_per_block_sqrt; if (0 != result_host.getRows() % max_threads_per_block_sqrt) { num_blocks.y++; } num_threads_per_block.x = max_threads_per_block_sqrt; num_threads_per_block.y = max_threads_per_block_sqrt; // run kernel tpoint t_start = std::chrono::high_resolution_clock::now(); //convSeparable<kernel_supp_half> << < num_blocks, num_threads_per_block >> >(kernel_device, image_device, image_conv_device, image.n_rows); matrixMultiplication << <num_blocks, num_threads_per_block >> > (matrix1_device, matrix2_device, result_device, matrix1_host.getCols(), matrix1_host.getRows(), matrix2_host.getCols(), matrix2_host.getRows()); tpoint t_end = std::chrono::high_resolution_clock::now(); double wall_clock = std::chrono::duration<double, std::milli>(t_end - t_start).count(); std::cerr << "Execution time: " << wall_clock << " ms." << std::endl; checkLastCudaError("Kernel execution failed"); cudaDeviceSynchronize(); // copy result back to host checkErrorsCuda(cudaMemcpy(&result_host.m_ptValues, result_device, sizeof(float) * result_host.getTotalSize(), cudaMemcpyDeviceToHost)); }
68c014ca3d438896187ad35a8f76f4e3ecab5cd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* * Implements a REAL modulo function **/ char mod(char x, char N) { // Source: https://stackoverflow.com/questions/11720656/modulo-operation-with-negative-numbers return (x % N + N) %N; } /* * Implements a REAL modulo function but on the GPU **/ __device__ char cudaMod(char x, char N) { // Source: https://stackoverflow.com/questions/11720656/modulo-operation-with-negative-numbers return (x % N + N) %N; } /** * Given a pointer to an integer array and the argv array, compute * the encryption key list into the int array and return how many * keys are given. */ int encryption_key_parse(char **argv, int **int_array) { int i = 0; /* Some loops to check if the brackets/comma's are present */ while(1) { if(argv[3][i] == '\0') break; i++; } char list[i]; for(int j = 0; j <= i; j++) { list[j] = argv[3][j]; } if(list[0] != '[' || list[i - 1] != ']') { printf("Error: Forget the brackets ([ or ]) \n"); return 0; } char list2[i - 2]; char check; int num_keys = 1; for(int j = 0; j <= (i - 3); j++) { check = list[j+1]; list2[j] = check; if(check == ',') { num_keys++; } } /* Malloc the keys for use in the main function */ int *keys; keys = (int*) malloc(sizeof(int)*num_keys); char *pointer; int keycounter = 0; pointer = strtok(list2, ","); /* Go through the string */ while(pointer != NULL) { int num = atoi(pointer); keys[keycounter] = num; keycounter++; /* Skip next comma */ pointer = strtok(NULL, ","); } *int_array = keys; return num_keys; } /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut, int key_length, char *deviceKey) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = cudaMod(deviceDataIn[index] - ' ' + deviceKey[0], 95) + ' '; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut, int key_length, char *deviceKey) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = cudaMod(deviceDataIn[index] - ' ' - deviceKey[0], 95) + ' '; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out, int key_length, int* key) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { // shift the letter according to caesars cypher data_out[i] = mod(data_in[i] - ' ' + *key,95) + ' '; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out, int key_length, int *key) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { // shift the letter according to caesars cypher data_out[i] = mod(data_in[i] - ' ' - *key,95) + ' '; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out, int key_length, int *key) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } char *deviceKey = NULL; checkCudaCall(hipMalloc((void **) &deviceKey, key_length * sizeof(int))); if (deviceKey == NULL) { checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(deviceKey, key, key_length*sizeof(int), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( encryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut, key_length, deviceKey); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); checkCudaCall(hipFree(deviceKey)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out, int key_length, int *key) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } char *deviceKey = NULL; checkCudaCall(hipMalloc((void **) &deviceKey, key_length * sizeof(int))); if (deviceKey == NULL) { checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(deviceKey, key, key_length*sizeof(int), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( decryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut, key_length, deviceKey); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); checkCudaCall(hipFree(deviceKey)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int *enc_key; int key_length = 0; if (argc == 4){ key_length = encryption_key_parse(argv, (int**)&enc_key); } int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out, key_length, enc_key); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out, key_length, enc_key); writeData(n, "cuda.data", data_out); cout << "Decrypting a file of " << n << "characters" << endl; readData("sequential.data", data_in); DecryptSeq(n, data_in, data_out, key_length, enc_key); writeData(n, "sequential_decrypted.data", data_out); readData("cuda.data", data_in); DecryptCuda(n, data_in, data_out, key_length, enc_key); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
68c014ca3d438896187ad35a8f76f4e3ecab5cd9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* * Implements a REAL modulo function **/ char mod(char x, char N) { // Source: https://stackoverflow.com/questions/11720656/modulo-operation-with-negative-numbers return (x % N + N) %N; } /* * Implements a REAL modulo function but on the GPU **/ __device__ char cudaMod(char x, char N) { // Source: https://stackoverflow.com/questions/11720656/modulo-operation-with-negative-numbers return (x % N + N) %N; } /** * Given a pointer to an integer array and the argv array, compute * the encryption key list into the int array and return how many * keys are given. */ int encryption_key_parse(char **argv, int **int_array) { int i = 0; /* Some loops to check if the brackets/comma's are present */ while(1) { if(argv[3][i] == '\0') break; i++; } char list[i]; for(int j = 0; j <= i; j++) { list[j] = argv[3][j]; } if(list[0] != '[' || list[i - 1] != ']') { printf("Error: Forget the brackets ([ or ]) \n"); return 0; } char list2[i - 2]; char check; int num_keys = 1; for(int j = 0; j <= (i - 3); j++) { check = list[j+1]; list2[j] = check; if(check == ',') { num_keys++; } } /* Malloc the keys for use in the main function */ int *keys; keys = (int*) malloc(sizeof(int)*num_keys); char *pointer; int keycounter = 0; pointer = strtok(list2, ","); /* Go through the string */ while(pointer != NULL) { int num = atoi(pointer); keys[keycounter] = num; keycounter++; /* Skip next comma */ pointer = strtok(NULL, ","); } *int_array = keys; return num_keys; } /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut, int key_length, char *deviceKey) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = cudaMod(deviceDataIn[index] - ' ' + deviceKey[0], 95) + ' '; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut, int key_length, char *deviceKey) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = cudaMod(deviceDataIn[index] - ' ' - deviceKey[0], 95) + ' '; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out, int key_length, int* key) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { // shift the letter according to caesars cypher data_out[i] = mod(data_in[i] - ' ' + *key,95) + ' '; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out, int key_length, int *key) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { // shift the letter according to caesars cypher data_out[i] = mod(data_in[i] - ' ' - *key,95) + ' '; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out, int key_length, int *key) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } char *deviceKey = NULL; checkCudaCall(cudaMalloc((void **) &deviceKey, key_length * sizeof(int))); if (deviceKey == NULL) { checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceKey, key, key_length*sizeof(int), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); encryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut, key_length, deviceKey); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); checkCudaCall(cudaFree(deviceKey)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out, int key_length, int *key) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } char *deviceKey = NULL; checkCudaCall(cudaMalloc((void **) &deviceKey, key_length * sizeof(int))); if (deviceKey == NULL) { checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceKey, key, key_length*sizeof(int), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); decryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut, key_length, deviceKey); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); checkCudaCall(cudaFree(deviceKey)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int *enc_key; int key_length = 0; if (argc == 4){ key_length = encryption_key_parse(argv, (int**)&enc_key); } int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out, key_length, enc_key); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out, key_length, enc_key); writeData(n, "cuda.data", data_out); cout << "Decrypting a file of " << n << "characters" << endl; readData("sequential.data", data_in); DecryptSeq(n, data_in, data_out, key_length, enc_key); writeData(n, "sequential_decrypted.data", data_out); readData("cuda.data", data_in); DecryptCuda(n, data_in, data_out, key_length, enc_key); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
4c1f7b486f721871546c46139c989ef50c568527.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include<iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> //Defining number of elements in Array #define N 5 //Kernel function for squaring number __global__ void gpuSquare(float *d_in, float *d_out) { //Getting thread index for current kernel int tid = threadIdx.x; // handle the data at this index float temp = d_in[tid]; d_out[tid] = temp*temp; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; // allocate the memory on the cpu hipMalloc((void**)&d_in, N * sizeof(float)); hipMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device hipMemcpy(d_in, h_in, N * sizeof(float), hipMemcpyHostToDevice); //Calling square kernel with one block and N threads per block gpuSquare << <1, N >> >(d_in, d_out); //Coping result back to host from device memory hipMemcpy(h_out, d_out, N * sizeof(float), hipMemcpyDeviceToHost); //Printing result on console printf("Square of Number on GPU \n"); for (int i = 0; i < N; i++) { printf("The square of %f is %f\n", h_in[i], h_out[i]); } //Free up memory hipFree(d_in); hipFree(d_out); return 0; }
4c1f7b486f721871546c46139c989ef50c568527.cu
#include "stdio.h" #include<iostream> #include <cuda.h> #include <cuda_runtime.h> //Defining number of elements in Array #define N 5 //Kernel function for squaring number __global__ void gpuSquare(float *d_in, float *d_out) { //Getting thread index for current kernel int tid = threadIdx.x; // handle the data at this index float temp = d_in[tid]; d_out[tid] = temp*temp; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; // allocate the memory on the cpu cudaMalloc((void**)&d_in, N * sizeof(float)); cudaMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice); //Calling square kernel with one block and N threads per block gpuSquare << <1, N >> >(d_in, d_out); //Coping result back to host from device memory cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost); //Printing result on console printf("Square of Number on GPU \n"); for (int i = 0; i < N; i++) { printf("The square of %f is %f\n", h_in[i], h_out[i]); } //Free up memory cudaFree(d_in); cudaFree(d_out); return 0; }
8cc25d3df6dd749c41ac9f7eed6d2296d52d3649.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <hip/hip_runtime_api.h> //#define N 1573700//1310720//262144//131072//262144//83886080 //Quantidade de threads por blocos #define BLOCK_SIZE 32//1//1024//95536 #define nThreadsPerBlock 128//420//128//420 ou 416 #define NFinal (nThreadsPerBlock * 5) // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } __device__ int* memoria(int *vetDados, int ElemPorBlocos, int qtdProces){ __shared__ int vetComp[4096]; int auxGrupoDe32 = (qtdProces * 32); int comecoBloco = blockIdx.x * ElemPorBlocos; // onde cada bloco ir comeca int qtdElemThread = ElemPorBlocos / blockDim.x; int idCompartilhada = threadIdx.x; int idGlobal = comecoBloco + ((threadIdx.x / 32) * qtdElemThread) + (threadIdx.x - ((threadIdx.x / 32) * 32)) + auxGrupoDe32; int i; for(i = 0; i < 4096; i += blockDim.x){ vetComp[idCompartilhada] = vetDados[idGlobal]; idCompartilhada += blockDim.x; idGlobal += (qtdElemThread * 4); } return vetComp; } __global__ void subSeqMax(int *vet, int *vetFinal, int ElemPorThread, int n){ __shared__ int *p; // ponteiro para apontar para o vetor compartilhado // M t_m S suf int ini_M, fim_M, t_M, ini_S, fim_S, suf; //Variaveis do algoritmo t_M = suf = 0; int comecoThread = (threadIdx.x * 32); int j; for(j = 0; j < (n / 4096); j++){ // Quantas vezes terei que processa at chegar no n/blocos sendo que o vet compartilhado de 4096 p = memoria(vet,n,j); __syncthreads(); if(threadIdx.x < 128){ ini_M = fim_M = ini_S = fim_S = comecoThread -1; int i; for(i = comecoThread -1; i < comecoThread + 32; i++){ if(i == fim_M){ fim_S++; suf += p[i+1]; if(suf < 0){ suf = 0; fim_S = -1; } ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S if(p[i+1] > 0){ fim_M++; t_M += p[i+1]; ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M } } else{ if(suf + p[i+1] > t_M){ fim_S++; if(ini_M == -1){ fim_S = ini_S = i +1; } suf += p[i+1]; ini_M = ini_S; fim_M = fim_S; t_M = suf; } else{ if(suf + p[i+1] > 0){ fim_S++; if(suf == 0){ ini_S = fim_S = i+1; } suf += p[i+1]; } else{ ini_S = fim_S = i + 2; suf = 0; } }//else }//else }// 1* for }// If 128 }// 2* for if(threadIdx.x < 128){ int idThread = blockIdx.x * blockDim.x + threadIdx.x; vetFinal[(idThread * 5)] = vetFinal[(idThread * 5)+1] = vetFinal[(idThread * 5)+2] = vetFinal[(idThread * 5)+3] = vetFinal[(idThread * 5)+4] = -1; //Colocando o M vetFinal[(idThread * 5)+2] = t_M; //Calculando o Prefixo int pref_Max, soma_Pref; soma_Pref = 0; pref_Max = 0; int i; if(ini_M > comecoThread -1){ for(i = 0; i < ini_M; i++){ soma_Pref += p[i]; if(soma_Pref > pref_Max){ pref_Max = soma_Pref; } } if(pref_Max == 0){ vetFinal[(idThread * 5)] = 0; vetFinal[(idThread * 5)+1] = soma_Pref; } else{ vetFinal[(idThread * 5)] = pref_Max; //Prefixo vetFinal[(idThread * 5)+1] = soma_Pref - pref_Max; //Numeros negativos } } //Calculo do sufixo int suf_Max, soma_Suf; soma_Suf = suf_Max = 0; if(fim_M < comecoThread + 32){ for(i = (comecoThread + 32)-1; i > fim_M; i--){ soma_Suf += p[i]; if(soma_Suf > suf_Max){ suf_Max = soma_Suf; } } if(suf_Max == 0){ vetFinal[(idThread * 5)+3] = 0; //Sufixo vazio vetFinal[(idThread * 5)+4] = suf_Max;//Os Numeros negativos } else{ vetFinal[(idThread * 5)+3] = suf_Max; //Sufixo vazio vetFinal[(idThread * 5)+4] = soma_Suf - suf_Max;//Os Numeros negativos } } }//if 128 } void subSeqMaxFinal(int *vet, int n){ // M t_m S suf int ini_M, fim_M, t_M, ini_S, fim_S, suf; ini_M = fim_M = ini_S = fim_S = -1; t_M = suf = 0; int i; for(i = -1; i < n-1; i++){ if(i == fim_M){ fim_S++; suf += vet[i+1]; if(suf < 0){ suf = 0; fim_S = -1; } ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S if(vet[i+1] > 0){ fim_M++; t_M += vet[i+1]; ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M } } else{ if(suf + vet[i+1] > t_M){ fim_S++; if(ini_M == -1){ fim_S = ini_S = i +1; } suf += vet[i+1]; ini_M = ini_S; fim_M = fim_S; t_M = suf; } else{ if(suf + vet[i+1] > 0){ fim_S++; if(suf == 0){ ini_S = fim_S = i+1; } } else{ ini_S = fim_S = i + 2; suf = 0; } } } } printf("Assertion started\n"); assert (t_M == 964); printf("Assertion Finished"); printf(" \n\n A sub Sequencia deu %d \n\n", t_M); } int main(int argc, char** argv){ float elapsedTime; // Tempo hipEvent_t start, stop; // Tempo //Vetor aux int *vet_d; int *vetFinal_d; if (argc != 3) { fprintf(stderr, "Syntax: %s <Vector size Width> <CacheConfL1> \n", argv[0]); return EXIT_FAILURE; } //Vet int N = atoi(argv[1]); int *vet_h = (int *) malloc(sizeof(int) * N); // Vetor Dados int *vetFinal_h = (int *) malloc(sizeof(int) * NFinal);// Vetor Final int i; for(i = 0; i < N; i++){ // Preenchimento dos dados vet_h[i] = -1; } for(i = 0; i < NFinal; i++){ // Preenchimento dos dados vetFinal_h[i] = -1; } vet_h[131] = 954; vet_h[132] = 10; int devId = 0; int CacheConfL1 = atoi(argv[2]); checkCuda( hipSetDevice(devId) ); hipDeviceReset(); hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, devId) ); printf("Device: %s\n", prop.name); //Reservando o espao na memoria no device hipMalloc((void**)&vet_d, N * sizeof(int)); //Vetor de dados hipMalloc((void**)&vetFinal_d, NFinal * sizeof(int));// Vetor Final //Copiando o vetor de dados para o device hipMemcpy(vet_d, vet_h, N * sizeof(int), hipMemcpyHostToDevice); int ElemPorBlocos = (N / BLOCK_SIZE); int ElemPorThread = (ElemPorBlocos / nThreadsPerBlock); if (CacheConfL1 == 1){ hipFuncSetCacheConfig(subSeqMax, hipFuncCachePreferShared); } else if (CacheConfL1 == 2){ hipFuncSetCacheConfig(subSeqMax, hipFuncCachePreferEqual); } else if (CacheConfL1 == 3){ hipFuncSetCacheConfig(subSeqMax, hipFuncCachePreferL1); } else { hipFuncSetCacheConfig(subSeqMax, hipFuncCachePreferNone); } hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipLaunchKernelGGL(( subSeqMax), dim3(BLOCK_SIZE), dim3(nThreadsPerBlock), 0, 0, vet_d, vetFinal_d, ElemPorThread,N / BLOCK_SIZE); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Primeiro kernel (ms) = %f\n\n", elapsedTime); hipMemcpy(vetFinal_h, vetFinal_d, NFinal * sizeof(int), hipMemcpyDeviceToHost); //Resposta Final /*for(i = 0; i < 4096; i++){ if(vetFinal_h[i] != 0 && vetFinal_h[i] != -1 ) printf("%d ", vetFinal_h[i]); }*/ printf("\n\n"); hipFree(vetFinal_d); hipFree(vet_d); subSeqMaxFinal(vetFinal_h, NFinal); return 0; }
8cc25d3df6dd749c41ac9f7eed6d2296d52d3649.cu
#include <stdio.h> #include <assert.h> #include <stdlib.h> #include <cuda_profiler_api.h> //#define N 1573700//1310720//262144//131072//262144//83886080 //Quantidade de threads por blocos #define BLOCK_SIZE 32//1//1024//95536 #define nThreadsPerBlock 128//420//128//420 ou 416 #define NFinal (nThreadsPerBlock * 5) // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } __device__ int* memoria(int *vetDados, int ElemPorBlocos, int qtdProces){ __shared__ int vetComp[4096]; int auxGrupoDe32 = (qtdProces * 32); int comecoBloco = blockIdx.x * ElemPorBlocos; // onde cada bloco irá comeca int qtdElemThread = ElemPorBlocos / blockDim.x; int idCompartilhada = threadIdx.x; int idGlobal = comecoBloco + ((threadIdx.x / 32) * qtdElemThread) + (threadIdx.x - ((threadIdx.x / 32) * 32)) + auxGrupoDe32; int i; for(i = 0; i < 4096; i += blockDim.x){ vetComp[idCompartilhada] = vetDados[idGlobal]; idCompartilhada += blockDim.x; idGlobal += (qtdElemThread * 4); } return vetComp; } __global__ void subSeqMax(int *vet, int *vetFinal, int ElemPorThread, int n){ __shared__ int *p; // ponteiro para apontar para o vetor compartilhado // M t_m S suf int ini_M, fim_M, t_M, ini_S, fim_S, suf; //Variaveis do algoritmo t_M = suf = 0; int comecoThread = (threadIdx.x * 32); int j; for(j = 0; j < (n / 4096); j++){ // Quantas vezes terei que processa até chegar no n/blocos sendo que o vet compartilhado é de 4096 p = memoria(vet,n,j); __syncthreads(); if(threadIdx.x < 128){ ini_M = fim_M = ini_S = fim_S = comecoThread -1; int i; for(i = comecoThread -1; i < comecoThread + 32; i++){ if(i == fim_M){ fim_S++; suf += p[i+1]; if(suf < 0){ suf = 0; fim_S = -1; } ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S if(p[i+1] > 0){ fim_M++; t_M += p[i+1]; ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M } } else{ if(suf + p[i+1] > t_M){ fim_S++; if(ini_M == -1){ fim_S = ini_S = i +1; } suf += p[i+1]; ini_M = ini_S; fim_M = fim_S; t_M = suf; } else{ if(suf + p[i+1] > 0){ fim_S++; if(suf == 0){ ini_S = fim_S = i+1; } suf += p[i+1]; } else{ ini_S = fim_S = i + 2; suf = 0; } }//else }//else }// 1* for }// If 128 }// 2* for if(threadIdx.x < 128){ int idThread = blockIdx.x * blockDim.x + threadIdx.x; vetFinal[(idThread * 5)] = vetFinal[(idThread * 5)+1] = vetFinal[(idThread * 5)+2] = vetFinal[(idThread * 5)+3] = vetFinal[(idThread * 5)+4] = -1; //Colocando o M vetFinal[(idThread * 5)+2] = t_M; //Calculando o Prefixo int pref_Max, soma_Pref; soma_Pref = 0; pref_Max = 0; int i; if(ini_M > comecoThread -1){ for(i = 0; i < ini_M; i++){ soma_Pref += p[i]; if(soma_Pref > pref_Max){ pref_Max = soma_Pref; } } if(pref_Max == 0){ vetFinal[(idThread * 5)] = 0; vetFinal[(idThread * 5)+1] = soma_Pref; } else{ vetFinal[(idThread * 5)] = pref_Max; //Prefixo vetFinal[(idThread * 5)+1] = soma_Pref - pref_Max; //Numeros negativos } } //Calculo do sufixo int suf_Max, soma_Suf; soma_Suf = suf_Max = 0; if(fim_M < comecoThread + 32){ for(i = (comecoThread + 32)-1; i > fim_M; i--){ soma_Suf += p[i]; if(soma_Suf > suf_Max){ suf_Max = soma_Suf; } } if(suf_Max == 0){ vetFinal[(idThread * 5)+3] = 0; //Sufixo vazio vetFinal[(idThread * 5)+4] = suf_Max;//Os Numeros negativos } else{ vetFinal[(idThread * 5)+3] = suf_Max; //Sufixo vazio vetFinal[(idThread * 5)+4] = soma_Suf - suf_Max;//Os Numeros negativos } } }//if 128 } void subSeqMaxFinal(int *vet, int n){ // M t_m S suf int ini_M, fim_M, t_M, ini_S, fim_S, suf; ini_M = fim_M = ini_S = fim_S = -1; t_M = suf = 0; int i; for(i = -1; i < n-1; i++){ if(i == fim_M){ fim_S++; suf += vet[i+1]; if(suf < 0){ suf = 0; fim_S = -1; } ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S if(vet[i+1] > 0){ fim_M++; t_M += vet[i+1]; ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M } } else{ if(suf + vet[i+1] > t_M){ fim_S++; if(ini_M == -1){ fim_S = ini_S = i +1; } suf += vet[i+1]; ini_M = ini_S; fim_M = fim_S; t_M = suf; } else{ if(suf + vet[i+1] > 0){ fim_S++; if(suf == 0){ ini_S = fim_S = i+1; } } else{ ini_S = fim_S = i + 2; suf = 0; } } } } printf("Assertion started\n"); assert (t_M == 964); printf("Assertion Finished"); printf(" \n\n A sub Sequencia deu %d \n\n", t_M); } int main(int argc, char** argv){ float elapsedTime; // Tempo cudaEvent_t start, stop; // Tempo //Vetor aux int *vet_d; int *vetFinal_d; if (argc != 3) { fprintf(stderr, "Syntax: %s <Vector size Width> <CacheConfL1> \n", argv[0]); return EXIT_FAILURE; } //Vet int N = atoi(argv[1]); int *vet_h = (int *) malloc(sizeof(int) * N); // Vetor Dados int *vetFinal_h = (int *) malloc(sizeof(int) * NFinal);// Vetor Final int i; for(i = 0; i < N; i++){ // Preenchimento dos dados vet_h[i] = -1; } for(i = 0; i < NFinal; i++){ // Preenchimento dos dados vetFinal_h[i] = -1; } vet_h[131] = 954; vet_h[132] = 10; int devId = 0; int CacheConfL1 = atoi(argv[2]); checkCuda( cudaSetDevice(devId) ); cudaDeviceReset(); cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, devId) ); printf("Device: %s\n", prop.name); //Reservando o espaço na memoria no device cudaMalloc((void**)&vet_d, N * sizeof(int)); //Vetor de dados cudaMalloc((void**)&vetFinal_d, NFinal * sizeof(int));// Vetor Final //Copiando o vetor de dados para o device cudaMemcpy(vet_d, vet_h, N * sizeof(int), cudaMemcpyHostToDevice); int ElemPorBlocos = (N / BLOCK_SIZE); int ElemPorThread = (ElemPorBlocos / nThreadsPerBlock); if (CacheConfL1 == 1){ cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferShared); } else if (CacheConfL1 == 2){ cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferEqual); } else if (CacheConfL1 == 3){ cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferL1); } else { cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferNone); } cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); subSeqMax<<<BLOCK_SIZE, nThreadsPerBlock>>>(vet_d, vetFinal_d, ElemPorThread,N / BLOCK_SIZE); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Primeiro kernel (ms) = %f\n\n", elapsedTime); cudaMemcpy(vetFinal_h, vetFinal_d, NFinal * sizeof(int), cudaMemcpyDeviceToHost); //Resposta Final /*for(i = 0; i < 4096; i++){ if(vetFinal_h[i] != 0 && vetFinal_h[i] != -1 ) printf("%d ", vetFinal_h[i]); }*/ printf("\n\n"); cudaFree(vetFinal_d); cudaFree(vet_d); subSeqMaxFinal(vetFinal_h, NFinal); return 0; }
027037b07ad70c63e7d9d84bbcc1125038d44578.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void add( int *a, int *b, int *c, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // HERE } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments if (argc != 3) { // Tell the user how to run the program printf ("Usage: %s vector_size block_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); int grid_size = ((vector_size-1)/block_size) + 1; // Set device that we will use for our cuda code hipSetDevice(0); // Time Variables hipEvent_t start, stop; float time; hipEventCreate (&start); hipEventCreate (&stop); // Input Arrays and variables int *a = new int [vector_size]; int *b = new int [vector_size]; int *c_cpu = new int [vector_size]; int *c_gpu = new int [vector_size]; // Pointers in GPU memory int *dev_a; int *dev_b; int *dev_c; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { a[i] = rand()%10; b[i] = rand()%10; } // // CPU Calculation ////////////////// printf("Running sequential job.\n"); hipEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { c_cpu[i] = a[i] + b[i]; } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // allocate the memory on the GPU hipMalloc(&dev_a, vector_size*sizeof(int)); // HERE hipMalloc(&dev_b, vector_size*sizeof(int)); hipMalloc(&dev_c, vector_size*sizeof(int)); // copy the arrays 'a' and 'b' to the GPU hipMemcpy(dev_a, a, vector_size*sizeof(int), hipMemcpyHostToDevice);// HERE hipMemcpy(dev_b, b, vector_size*sizeof(int), hipMemcpyHostToDevice); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); hipEventRecord(start,0); // call the kernel hipLaunchKernelGGL(( add), dim3(grid_size), dim3(block_size), 0, 0, dev_a, dev_b, dev_c, vector_size); // HERE hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // copy the array 'c' back from the GPU to the CPU hipMemcpy(c_gpu, dev_c, vector_size*sizeof(int), hipMemcpyDeviceToHost); // HERE (there's one more at the end, don't miss it!) // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data free (a); free (b); free (c_cpu); free (c_gpu); // free the memory allocated on the GPU // HERE hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
027037b07ad70c63e7d9d84bbcc1125038d44578.cu
#include <stdio.h> #include <stdlib.h> __global__ void add( int *a, int *b, int *c, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // HERE } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments if (argc != 3) { // Tell the user how to run the program printf ("Usage: %s vector_size block_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); int grid_size = ((vector_size-1)/block_size) + 1; // Set device that we will use for our cuda code cudaSetDevice(0); // Time Variables cudaEvent_t start, stop; float time; cudaEventCreate (&start); cudaEventCreate (&stop); // Input Arrays and variables int *a = new int [vector_size]; int *b = new int [vector_size]; int *c_cpu = new int [vector_size]; int *c_gpu = new int [vector_size]; // Pointers in GPU memory int *dev_a; int *dev_b; int *dev_c; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { a[i] = rand()%10; b[i] = rand()%10; } // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { c_cpu[i] = a[i] + b[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // allocate the memory on the GPU cudaMalloc(&dev_a, vector_size*sizeof(int)); // HERE cudaMalloc(&dev_b, vector_size*sizeof(int)); cudaMalloc(&dev_c, vector_size*sizeof(int)); // copy the arrays 'a' and 'b' to the GPU cudaMemcpy(dev_a, a, vector_size*sizeof(int), cudaMemcpyHostToDevice);// HERE cudaMemcpy(dev_b, b, vector_size*sizeof(int), cudaMemcpyHostToDevice); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); cudaEventRecord(start,0); // call the kernel add<<<grid_size, block_size>>>(dev_a, dev_b, dev_c, vector_size); // HERE cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // copy the array 'c' back from the GPU to the CPU cudaMemcpy(c_gpu, dev_c, vector_size*sizeof(int), cudaMemcpyDeviceToHost); // HERE (there's one more at the end, don't miss it!) // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data free (a); free (b); free (c_cpu); free (c_gpu); // free the memory allocated on the GPU // HERE cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
3622260fa0152396ebc3a84e369b73ee82ab95ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( add) , dim3(1), dim3(1) , 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
3622260fa0152396ebc3a84e369b73ee82ab95ac.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU add <<< 1, 1 >>> (N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
30d5feb08e4c7d09c5de0d0ff11b4c81f6131223.hip
// !!! This is a file automatically generated by hipify!!! /**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include <string.h> #include <stdio.h> #include <assert.h> #include <limits.h> #include <sys/types.h> #include <unistd.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <mpi.h> #include <gdsync.h> #include <mp.h> #include "prof.h" #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,hipGetErrorString(result));\ exit(-1); \ } \ assert(hipSuccess == result); \ } while (0) #define CU_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %d \n", \ __FILE__, __LINE__, result);\ exit(-1); \ } \ assert(hipSuccess == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 1*1024*1024 #define ITER_COUNT_SMALL 1000 #define ITER_COUNT_LARGE 1000 struct prof prof_normal; struct prof prof_async; int prof_start = 0; int prof_idx = 0; static const int over_sub_factor = 2; int gpu_num_sm; int enable_ud = 0; int gpu_id = -1; int comm_size, my_rank, peer; int steps_per_batch = 20, batches_inflight = 4; int enable_async = 1; int calc_size = 128*1024; int use_calc_size = 1; volatile uint32_t tracking_event = 0; __device__ int counter; __device__ int clockrate; __global__ void calc_kernel(int n, float c, float *in, float *out) { const uint tid = threadIdx.x; const uint bid = blockIdx.x; const uint block_size = blockDim.x; const uint grid_size = gridDim.x; const uint gid = tid + bid*block_size; const uint n_threads = block_size*grid_size; for (int i=gid; i<n; i += n_threads) out[i] = in[i] * c; } int gpu_launch_calc_kernel(size_t size, hipStream_t stream) { const int nblocks = over_sub_factor * gpu_num_sm; const int nthreads = 32*2; int n = size / sizeof(float); static float *in = NULL; static float *out = NULL; if (!in) { CUDA_CHECK(hipMalloc((void **)&in, size)); CUDA_CHECK(hipMalloc((void **)&out, size)); CUDA_CHECK(hipMemset((void *)in, 1, size)); CUDA_CHECK(hipMemset((void *)out, 1, size)); } hipLaunchKernelGGL(( calc_kernel), dim3(nblocks), dim3(nthreads), 0, stream, n, 1.0f, in, out); CUDA_CHECK(hipGetLastError()); return 0; } __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } /*application and pack buffers*/ void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL; hipStream_t stream; size_t buf_size; /*mp specific objects*/ mp_request_t *sreq = NULL; mp_request_t *rreq = NULL; mp_reg_t sreg, rreg; double time_start, time_stop; int batch_to_rreq_idx (int batch_idx) { return (batch_idx % (batches_inflight + 1))*steps_per_batch; } int batch_to_sreq_idx (int batch_idx) { return (batch_idx % batches_inflight)*steps_per_batch; } void post_recv (int size, int batch_index) { int j; int req_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d), size, peer, &rreg, &rreq[req_idx + j])); } } void wait_send (int batch_index) { int j; int req_idx = batch_to_sreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_wait(&sreq[req_idx + j])); } } void wait_recv (int batch_index) { int j; int req_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_wait(&rreq[req_idx + j])); } } void post_work_async (int size, int batch_index, double kernel_size) { int j; int sreq_idx = batch_to_sreq_idx (batch_index); int rreq_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { if (!my_rank) { MP_CHECK(mp_wait_on_stream(&rreq[rreq_idx + j], stream)); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_size); } MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j], stream)); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j], stream)); MP_CHECK(mp_wait_on_stream(&rreq[rreq_idx + j], stream)); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_size); } } } } void post_work_sync (int size, int batch_index, double kernel_size) { int j; int rreq_idx = batch_to_rreq_idx (batch_index); int sreq_idx = batch_to_sreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { if (!my_rank) { MP_CHECK(mp_wait(&rreq[rreq_idx + j])); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_size); CUDA_CHECK(hipStreamSynchronize(stream)); } MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j])); } else { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j])); MP_CHECK(mp_wait(&rreq[rreq_idx + j])); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_size); CUDA_CHECK(hipStreamSynchronize(stream)); } } } } double prepost_latency; double sr_exchange (MPI_Comm comm, int size, int iter_count, double kernel_size, int use_async) { int j; double latency; double time_start, time_stop; int batch_count, wait_send_batch = 0, wait_recv_batch = 0; struct prof *prof = NULL; prof = (use_async) ? &prof_async : &prof_normal; assert((iter_count%steps_per_batch) == 0); batch_count = iter_count/steps_per_batch; tracking_event = 0; post_recv (size, 0); MPI_Barrier(MPI_COMM_WORLD); time_start = MPI_Wtime(); for (j=0; (j<batches_inflight) && (j<batch_count); j++) { if (j<(batch_count-1)) { post_recv (size, j+1); } if (use_async) { post_work_async (size, j, kernel_size); } else { post_work_sync (size, j, kernel_size); } } time_stop = MPI_Wtime(); prepost_latency = ((time_stop - time_start)*1e6); time_start = MPI_Wtime(); wait_send_batch = wait_recv_batch = 0; prof_idx = 0; while (wait_send_batch < batch_count) { if (!my_rank && prof_start) PROF(prof, prof_idx++); if (use_async) { wait_recv (wait_recv_batch); wait_recv_batch++; } if (!my_rank && prof_start) PROF(prof, prof_idx++); wait_send (wait_send_batch); wait_send_batch++; if (!my_rank && prof_start) PROF(prof, prof_idx++); if (j < (batch_count-1)) { post_recv (size, j+1); } if (!my_rank && prof_start) PROF(prof, prof_idx++); if (j < batch_count) { if (use_async) { post_work_async (size, j, kernel_size); } else { post_work_sync (size, j, kernel_size); } } if (!my_rank && prof_start) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } j++; } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6 + prepost_latency)/(iter_count)); CUDA_CHECK(hipDeviceSynchronize()); return latency; } int main (int argc, char *argv[]) { int iter_count, max_size, size, dev_count, local_rank, dev_id = 0; int kernel_size = 20; int comm_comp_ratio = 0; int validate = 0; size = 1; max_size = MAX_SIZE; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KERNEL_TIME"); if (value != NULL) { kernel_size = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } value = getenv("CALC_SIZE"); if (value != NULL) { calc_size = atoi(value); } use_calc_size = 1; value = getenv("USE_CALC_SIZE"); if (value != NULL) { use_calc_size = atoi(value); } value = getenv("STEPS_PER_BATCH"); if (value != NULL) { steps_per_batch = atoi(value); } value = getenv("BATCHES_INFLIGHT"); if (value != NULL) { batches_inflight = atoi(value); } value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } value = getenv("MP_ENABLE_UD"); if (value != NULL) { enable_ud = atoi(value); } if (enable_ud) { if (max_size > 4096) { max_size = 4096; } } while(1) { int c; c = getopt(argc, argv, "d:h"); if (c == -1) break; switch(c) { case 'd': gpu_id = strtol(optarg, NULL, 0); break; case 'h': printf("syntax: %s [-d <gpu_id]\n", argv[0]); break; default: printf("ERROR: invalid option\n"); exit(EXIT_FAILURE); } } char *tags = "wait_recv|wait_send|post_recv|post_work"; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(hipGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK")); } else { local_rank = 0; } if (gpu_id >= 0) { dev_id = gpu_id; } else if (getenv("USE_GPU")) { dev_id = atoi(getenv("USE_GPU")); } else { dev_id = local_rank%dev_count; } if (dev_id >= dev_count) { fprintf(stderr, "invalid dev_id=%d\n", dev_id); exit(-1); } fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id); CUDA_CHECK(hipSetDevice(dev_id)); CUDA_CHECK(hipFree(0)); hipDeviceProp_t prop; CUDA_CHECK(hipGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice)); gpu_num_sm = prop.multiProcessorCount; fprintf(stdout, "[%d] GPU %d: %s PCIe %d:%d:%d\n", my_rank, dev_id, prop.name, prop.pciDomainID, prop.pciBusID, prop.pciDeviceID); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (!my_rank) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); fprintf(stdout, "WARNING: dumping round-trip latency!!!\n"); } /*allocating requests*/ sreq = (mp_request_t *) malloc(steps_per_batch*batches_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(steps_per_batch*(batches_inflight + 1)*sizeof(mp_request_t)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, 0)); if (!my_rank) { if (use_calc_size) { fprintf(stdout, "%10s \t %10s \t %10s \t %10s \t %10s \t %10s \n", "Size", "CalcSize", "No-async", "No-async+Kernel", "Async", "Async+Kernel"); } else { fprintf(stdout, "%10s \t %10s \t %10s \t %10s \t %10s \t %10s \n", "Size", "KernelTime", "No-async", "No-async+Kernel", "Async", "Async+Kernel"); } } if (size != 1) size = max_size = size; for (; size<=max_size; size*=2) { double latency; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size; buf = malloc (buf_size); memset(buf, 0, buf_size); CUDA_CHECK(hipMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(hipMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(hipMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (!my_rank) fprintf(stdout, "%10d", size); #if 0 if (!my_rank) fprintf(stdout, "sleeping 10s\n"); sleep(10); MPI_Barrier(MPI_COMM_WORLD); #endif /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (use_calc_size) kernel_size = calc_size; else kernel_size = (comm_comp_ratio > 0) ? comm_comp_ratio*(latency/2) : kernel_size; if (!my_rank) fprintf(stdout, "\t %10d", kernel_size); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); hipProfilerStart(); if (!my_rank) { if (prof_init(&prof_normal, 10000, 10000, "10us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 10000, 10000, "10us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, kernel_size, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf ", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); if (!my_rank) { prof_start = 0; } hipProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf ", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); hipProfilerStart(); if (!my_rank) { prof_start = 1; } /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, kernel_size, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf) \n", latency, prepost_latency); prof_start = 0; hipProfilerStop(); if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (!my_rank) { //prof_dump(&prof_normal); prof_dump(&prof_async); } mp_deregister(&sreg); mp_deregister(&rreg); CUDA_CHECK(hipFree(sbuf_d)); CUDA_CHECK(hipFree(rbuf_d)); free(buf); } CUDA_CHECK(hipStreamDestroy(stream)); free(sreq); free(rreq); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
30d5feb08e4c7d09c5de0d0ff11b4c81f6131223.cu
/**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include <string.h> #include <stdio.h> #include <assert.h> #include <limits.h> #include <sys/types.h> #include <unistd.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <mpi.h> #include <gdsync.h> #include <mp.h> #include "prof.h" #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,cudaGetErrorString(result));\ exit(-1); \ } \ assert(cudaSuccess == result); \ } while (0) #define CU_CHECK(stmt) \ do { \ CUresult result = (stmt); \ if (CUDA_SUCCESS != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %d \n", \ __FILE__, __LINE__, result);\ exit(-1); \ } \ assert(CUDA_SUCCESS == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 1*1024*1024 #define ITER_COUNT_SMALL 1000 #define ITER_COUNT_LARGE 1000 struct prof prof_normal; struct prof prof_async; int prof_start = 0; int prof_idx = 0; static const int over_sub_factor = 2; int gpu_num_sm; int enable_ud = 0; int gpu_id = -1; int comm_size, my_rank, peer; int steps_per_batch = 20, batches_inflight = 4; int enable_async = 1; int calc_size = 128*1024; int use_calc_size = 1; volatile uint32_t tracking_event = 0; __device__ int counter; __device__ int clockrate; __global__ void calc_kernel(int n, float c, float *in, float *out) { const uint tid = threadIdx.x; const uint bid = blockIdx.x; const uint block_size = blockDim.x; const uint grid_size = gridDim.x; const uint gid = tid + bid*block_size; const uint n_threads = block_size*grid_size; for (int i=gid; i<n; i += n_threads) out[i] = in[i] * c; } int gpu_launch_calc_kernel(size_t size, cudaStream_t stream) { const int nblocks = over_sub_factor * gpu_num_sm; const int nthreads = 32*2; int n = size / sizeof(float); static float *in = NULL; static float *out = NULL; if (!in) { CUDA_CHECK(cudaMalloc((void **)&in, size)); CUDA_CHECK(cudaMalloc((void **)&out, size)); CUDA_CHECK(cudaMemset((void *)in, 1, size)); CUDA_CHECK(cudaMemset((void *)out, 1, size)); } calc_kernel<<<nblocks, nthreads, 0, stream>>>(n, 1.0f, in, out); CUDA_CHECK(cudaGetLastError()); return 0; } __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } /*application and pack buffers*/ void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL; cudaStream_t stream; size_t buf_size; /*mp specific objects*/ mp_request_t *sreq = NULL; mp_request_t *rreq = NULL; mp_reg_t sreg, rreg; double time_start, time_stop; int batch_to_rreq_idx (int batch_idx) { return (batch_idx % (batches_inflight + 1))*steps_per_batch; } int batch_to_sreq_idx (int batch_idx) { return (batch_idx % batches_inflight)*steps_per_batch; } void post_recv (int size, int batch_index) { int j; int req_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d), size, peer, &rreg, &rreq[req_idx + j])); } } void wait_send (int batch_index) { int j; int req_idx = batch_to_sreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_wait(&sreq[req_idx + j])); } } void wait_recv (int batch_index) { int j; int req_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { MP_CHECK(mp_wait(&rreq[req_idx + j])); } } void post_work_async (int size, int batch_index, double kernel_size) { int j; int sreq_idx = batch_to_sreq_idx (batch_index); int rreq_idx = batch_to_rreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { if (!my_rank) { MP_CHECK(mp_wait_on_stream(&rreq[rreq_idx + j], stream)); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else dummy_kernel <<<1, 1, 0, stream>>> (kernel_size); } MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j], stream)); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j], stream)); MP_CHECK(mp_wait_on_stream(&rreq[rreq_idx + j], stream)); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else dummy_kernel <<<1, 1, 0, stream>>> (kernel_size); } } } } void post_work_sync (int size, int batch_index, double kernel_size) { int j; int rreq_idx = batch_to_rreq_idx (batch_index); int sreq_idx = batch_to_sreq_idx (batch_index); for (j=0; j<steps_per_batch; j++) { if (!my_rank) { MP_CHECK(mp_wait(&rreq[rreq_idx + j])); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else dummy_kernel <<<1, 1, 0, stream>>> (kernel_size); CUDA_CHECK(cudaStreamSynchronize(stream)); } MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j])); } else { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d), size, peer, &sreg, &sreq[sreq_idx + j])); MP_CHECK(mp_wait(&rreq[rreq_idx + j])); if (kernel_size > 0) { if (use_calc_size > 0) gpu_launch_calc_kernel(kernel_size, stream); else dummy_kernel <<<1, 1, 0, stream>>> (kernel_size); CUDA_CHECK(cudaStreamSynchronize(stream)); } } } } double prepost_latency; double sr_exchange (MPI_Comm comm, int size, int iter_count, double kernel_size, int use_async) { int j; double latency; double time_start, time_stop; int batch_count, wait_send_batch = 0, wait_recv_batch = 0; struct prof *prof = NULL; prof = (use_async) ? &prof_async : &prof_normal; assert((iter_count%steps_per_batch) == 0); batch_count = iter_count/steps_per_batch; tracking_event = 0; post_recv (size, 0); MPI_Barrier(MPI_COMM_WORLD); time_start = MPI_Wtime(); for (j=0; (j<batches_inflight) && (j<batch_count); j++) { if (j<(batch_count-1)) { post_recv (size, j+1); } if (use_async) { post_work_async (size, j, kernel_size); } else { post_work_sync (size, j, kernel_size); } } time_stop = MPI_Wtime(); prepost_latency = ((time_stop - time_start)*1e6); time_start = MPI_Wtime(); wait_send_batch = wait_recv_batch = 0; prof_idx = 0; while (wait_send_batch < batch_count) { if (!my_rank && prof_start) PROF(prof, prof_idx++); if (use_async) { wait_recv (wait_recv_batch); wait_recv_batch++; } if (!my_rank && prof_start) PROF(prof, prof_idx++); wait_send (wait_send_batch); wait_send_batch++; if (!my_rank && prof_start) PROF(prof, prof_idx++); if (j < (batch_count-1)) { post_recv (size, j+1); } if (!my_rank && prof_start) PROF(prof, prof_idx++); if (j < batch_count) { if (use_async) { post_work_async (size, j, kernel_size); } else { post_work_sync (size, j, kernel_size); } } if (!my_rank && prof_start) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } j++; } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6 + prepost_latency)/(iter_count)); CUDA_CHECK(cudaDeviceSynchronize()); return latency; } int main (int argc, char *argv[]) { int iter_count, max_size, size, dev_count, local_rank, dev_id = 0; int kernel_size = 20; int comm_comp_ratio = 0; int validate = 0; size = 1; max_size = MAX_SIZE; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KERNEL_TIME"); if (value != NULL) { kernel_size = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } value = getenv("CALC_SIZE"); if (value != NULL) { calc_size = atoi(value); } use_calc_size = 1; value = getenv("USE_CALC_SIZE"); if (value != NULL) { use_calc_size = atoi(value); } value = getenv("STEPS_PER_BATCH"); if (value != NULL) { steps_per_batch = atoi(value); } value = getenv("BATCHES_INFLIGHT"); if (value != NULL) { batches_inflight = atoi(value); } value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } value = getenv("MP_ENABLE_UD"); if (value != NULL) { enable_ud = atoi(value); } if (enable_ud) { if (max_size > 4096) { max_size = 4096; } } while(1) { int c; c = getopt(argc, argv, "d:h"); if (c == -1) break; switch(c) { case 'd': gpu_id = strtol(optarg, NULL, 0); break; case 'h': printf("syntax: %s [-d <gpu_id]\n", argv[0]); break; default: printf("ERROR: invalid option\n"); exit(EXIT_FAILURE); } } char *tags = "wait_recv|wait_send|post_recv|post_work"; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(cudaGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK")); } else { local_rank = 0; } if (gpu_id >= 0) { dev_id = gpu_id; } else if (getenv("USE_GPU")) { dev_id = atoi(getenv("USE_GPU")); } else { dev_id = local_rank%dev_count; } if (dev_id >= dev_count) { fprintf(stderr, "invalid dev_id=%d\n", dev_id); exit(-1); } fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id); CUDA_CHECK(cudaSetDevice(dev_id)); CUDA_CHECK(cudaFree(0)); cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice)); gpu_num_sm = prop.multiProcessorCount; fprintf(stdout, "[%d] GPU %d: %s PCIe %d:%d:%d\n", my_rank, dev_id, prop.name, prop.pciDomainID, prop.pciBusID, prop.pciDeviceID); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (!my_rank) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); fprintf(stdout, "WARNING: dumping round-trip latency!!!\n"); } /*allocating requests*/ sreq = (mp_request_t *) malloc(steps_per_batch*batches_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(steps_per_batch*(batches_inflight + 1)*sizeof(mp_request_t)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, 0)); if (!my_rank) { if (use_calc_size) { fprintf(stdout, "%10s \t %10s \t %10s \t %10s \t %10s \t %10s \n", "Size", "CalcSize", "No-async", "No-async+Kernel", "Async", "Async+Kernel"); } else { fprintf(stdout, "%10s \t %10s \t %10s \t %10s \t %10s \t %10s \n", "Size", "KernelTime", "No-async", "No-async+Kernel", "Async", "Async+Kernel"); } } if (size != 1) size = max_size = size; for (; size<=max_size; size*=2) { double latency; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size; buf = malloc (buf_size); memset(buf, 0, buf_size); CUDA_CHECK(cudaMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(cudaMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(cudaMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (!my_rank) fprintf(stdout, "%10d", size); #if 0 if (!my_rank) fprintf(stdout, "sleeping 10s\n"); sleep(10); MPI_Barrier(MPI_COMM_WORLD); #endif /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (use_calc_size) kernel_size = calc_size; else kernel_size = (comm_comp_ratio > 0) ? comm_comp_ratio*(latency/2) : kernel_size; if (!my_rank) fprintf(stdout, "\t %10d", kernel_size); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); cudaProfilerStart(); if (!my_rank) { if (prof_init(&prof_normal, 10000, 10000, "10us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 10000, 10000, "10us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, kernel_size, 0/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf ", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); if (!my_rank) { prof_start = 0; } cudaProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, 0/*kernel_size*/, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf ", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf)", latency, prepost_latency); cudaProfilerStart(); if (!my_rank) { prof_start = 1; } /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, kernel_size, 1/*use_async*/); MPI_Barrier(MPI_COMM_WORLD); if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency, prepost_latency); //if (!my_rank) fprintf(stdout, "\t %8.2lf (%8.2lf) \n", latency, prepost_latency); prof_start = 0; cudaProfilerStop(); if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (!my_rank) { //prof_dump(&prof_normal); prof_dump(&prof_async); } mp_deregister(&sreg); mp_deregister(&rreg); CUDA_CHECK(cudaFree(sbuf_d)); CUDA_CHECK(cudaFree(rbuf_d)); free(buf); } CUDA_CHECK(cudaStreamDestroy(stream)); free(sreq); free(rreq); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
b13c5a4b007629d5af10b78695b370feee2c02f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Thinning.cu // #include "Thinning.h" #include <iostream> #include <stdio.h> using namespace std; // DEF_BLOCK_X DEF_BLOCK_Y // #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 #define uchar unsigned char #define HIGH 255 #define LOW 0 static __global__ void _thinDP1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; // p1 p2 p3 // p4 p5 // p6 p7 p8 unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if ((x4 == HIGH && x5 == LOW) && !((x2 == LOW && x3 == HIGH) || (x7 == LOW && x8 == HIGH)) && !(x1 == LOW && x2 == LOW && x3 == LOW && x6 == LOW && x7 == LOW && x8 == LOW)){ outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDP2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x7 == HIGH && x2 == LOW && !((x4 == LOW && x1 == HIGH) || (x5 == LOW && x3 == HIGH)) && !(x1 == LOW && x4 == LOW && x6 == LOW && x3 == LOW && x5 == LOW && x8 == LOW)){ outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDP3Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x5 == HIGH && x4 == LOW && !((x2 == LOW && x1 == HIGH) || (x7 == LOW && x6 == HIGH)) && !(x1 == LOW && x2 == LOW && x3 == LOW && x6 == LOW && x7 == LOW && x8 == LOW)) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDP4Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x2 == HIGH && x7 == LOW && !((x4 == LOW && x6 == HIGH) || (x5 == LOW && x8 == HIGH)) && !(x1 == LOW && x4 == LOW && x6 == LOW && x3 == LOW && x5 == LOW && x8 == LOW)) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } // // __host__ int Thinning::thinDP(Image *inimg, Image *outimg) { // int errcode; hipError_t cudaerrcode; // if (inimg == NULL || outimg == NULL) return NULL_POINTER; // Image *tempimg = NULL; int *devchangecount = NULL; // host int changeCount; // device cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } // inimg outimg outimg // device errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // Kernel dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 1 changeCount = 1; // changeCount 0 // while (changeCount > 0) { // host 0 device devchangecount changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDP1Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDP2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDP3Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDP4Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // device devchangecount host changeCount // cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) { // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } } // hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // static __global__ void _thinDPFour1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) // { // // dstc dstr x y c // // columnr row // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // // // // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // // unsigned char *outptr; // // // int curpos = dstr * tempimg.pitchBytes + dstc; // // // outptr = tempimg.imgMeta.imgData + curpos; // // , // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p6); // int m2 = (p4 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p6); // int m2 = (p4 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // } // } // static __global__ void _thinDPFour2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) // { // // dstc dstr x y c // // columnr row // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // // // // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // // unsigned char *outptr; // // // int curpos = dstr * tempimg.pitchBytes + dstc; // // // outptr = tempimg.imgMeta.imgData + curpos; // // , // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p8); // int m2 = (p2 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p8); // int m2 = (p2 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // } // } // // // // // __host__ int Thinning::thinDPFour(Image *inimg, Image *outimg) // { // // // int errcode; // hipError_t cudaerrcode; // // // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // // // Image *tempimg = NULL; // int *devchangecount = NULL; // // host // int changeCount; // // device // cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // // inimg outimg outimg // // device // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // Kernel // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y*4 - 1) / blocksize.y*4; // // 1 // changeCount = 1; // // changeCount 0 // // // while (changeCount > 0) { // // host 0 device devchangecount // changeCount = 0; // cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), // hipMemcpyHostToDevice); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // // _thinDPFour1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); // if (hipGetLastError() != hipSuccess) { // // // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // // _thinDPFour2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); // if (hipGetLastError() != hipSuccess) { // // // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // device devchangecount host changeCount // // // cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), // hipMemcpyDeviceToHost); // if (cudaerrcode != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // } // // // hipFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // } static __global__ void _thinDPPt1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index]) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDPPt2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 256]) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDPPt3Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 512]) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } static __global__ void _thinDPPt4Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc dstr x y c // columnr row int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // // if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // , if (*outptr != LOW) { // 8 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 768]) { outimg.imgMeta.imgData[curpos] = LOW; // devchangecount 1 *devchangecount = 1; } } } __host__ int Thinning::thinDPPt(Image *inimg, Image *outimg) { // int errcode; hipError_t cudaerrcode; // if (inimg == NULL || outimg == NULL) return NULL_POINTER; // Image *tempimg = NULL; int *devchangecount = NULL; // host int changeCount; // device cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } uchar lut[1024] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; uchar *dev_lut; cudaerrcode = hipMalloc((void **)&dev_lut, sizeof (uchar) * 1024); if (cudaerrcode != hipSuccess) return CUDA_ERROR; cudaerrcode = hipMemcpy(dev_lut, lut, sizeof (uchar) * 1024, hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) return CUDA_ERROR; // errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } // inimg outimg outimg // device errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // Kernel dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 1 changeCount = 1; // changeCount 0 // while (changeCount > 0) { // host 0 device devchangecount changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDPPt1Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDPPt2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDPPt3Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } // hipLaunchKernelGGL(( _thinDPPt4Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (hipGetLastError() != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // device devchangecount host changeCount // cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) { // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } } // hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // static __global__ void _thinDPPtFour1Ker(ImageCuda tempimg, ImageCuda outimg, // int *devchangecount, uchar *dev_lut) // { // // dstc dstr x y c // // columnr row // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // // // // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // // unsigned char *outptr; // // // int curpos = dstr * tempimg.pitchBytes + dstc; // // // outptr = tempimg.imgMeta.imgData + curpos; // // , // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index]) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index]) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // } // } // static __global__ void _thinDPPtFour2Ker(ImageCuda tempimg, ImageCuda outimg, // int *devchangecount, uchar *dev_lut) // { // // dstc dstr x y c // // columnr row // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // // // // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // // unsigned char *outptr; // // // int curpos = dstr * tempimg.pitchBytes + dstc; // // // outptr = tempimg.imgMeta.imgData + curpos; // // , // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index + 256]) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 8 // // // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index + 256]) { // outimg.imgMeta.imgData[curpos] = LOW; // // devchangecount 1 // *devchangecount = 1; // } // } // } // } // __host__ int Thinning::thinDPPtFour(Image *inimg, Image *outimg) // { // // // int errcode; // hipError_t cudaerrcode; // // // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // // // Image *tempimg = NULL; // int *devchangecount = NULL; // // host // int changeCount; // // device // cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // uchar lut[512] = // { // 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, // 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0 // }; // uchar *dev_lut; // cudaerrcode = hipMalloc((void **)&dev_lut, sizeof (uchar) * 512); // if (cudaerrcode != hipSuccess) // return CUDA_ERROR; // cudaerrcode = hipMemcpy(dev_lut, lut, sizeof (uchar) * 512, // hipMemcpyHostToDevice); // if (cudaerrcode != hipSuccess) // return CUDA_ERROR; // // // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // // inimg outimg outimg // // device // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // Kernel // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; // // 1 // changeCount = 1; // // changeCount 0 // // // while (changeCount > 0) { // // host 0 device devchangecount // changeCount = 0; // cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), // hipMemcpyHostToDevice); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // // _thinDPPtFour1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); // if (hipGetLastError() != hipSuccess) { // // // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // // // _thinDPPtFour2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); // if (hipGetLastError() != hipSuccess) { // // // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // device devchangecount host changeCount // // // cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), // hipMemcpyDeviceToHost); // if (cudaerrcode != hipSuccess) { // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // } // // // hipFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // }
b13c5a4b007629d5af10b78695b370feee2c02f9.cu
// Thinning.cu // 实现二值图像的细化算法 #include "Thinning.h" #include <iostream> #include <stdio.h> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 #define uchar unsigned char #define HIGH 255 #define LOW 0 static __global__ void _thinDP1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; // p1 p2 p3 // p4 p5 // p6 p7 p8 unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if ((x4 == HIGH && x5 == LOW) && !((x2 == LOW && x3 == HIGH) || (x7 == LOW && x8 == HIGH)) && !(x1 == LOW && x2 == LOW && x3 == LOW && x6 == LOW && x7 == LOW && x8 == LOW)){ outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDP2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x7 == HIGH && x2 == LOW && !((x4 == LOW && x1 == HIGH) || (x5 == LOW && x3 == HIGH)) && !(x1 == LOW && x4 == LOW && x6 == LOW && x3 == LOW && x5 == LOW && x8 == LOW)){ outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDP3Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x5 == HIGH && x4 == LOW && !((x2 == LOW && x1 == HIGH) || (x7 == LOW && x6 == HIGH)) && !(x1 == LOW && x2 == LOW && x3 == LOW && x6 == LOW && x7 == LOW && x8 == LOW)) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDP4Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char x1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char x2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char x3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char x4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char x5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char x6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char x7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char x8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; if (x2 == HIGH && x7 == LOW && !((x4 == LOW && x6 == HIGH) || (x5 == LOW && x8 == HIGH)) && !(x1 == LOW && x4 == LOW && x6 == LOW && x3 == LOW && x5 == LOW && x8 == LOW)) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } // 直接并行化 // 线程数,处理多少个点有多少线程数 __host__ int Thinning::thinDP(Image *inimg, Image *outimg) { // 局部变量,错误码。 int errcode; cudaError_t cudaerrcode; // 检查输入图像,输出图像是否为空。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 声明所有中间变量并初始化为空。 Image *tempimg = NULL; int *devchangecount = NULL; // 记录细化点数的变量,位于 host 端。 int changeCount; // 记录细化点数的变量,位于 device 端。并为其申请空间。 cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 生成暂存图像。 errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 // device 端。 errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 提取输出图像 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 提取暂存图像 ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 赋值为 1,以便开始第一次迭代。 changeCount = 1; // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, // 停止迭代。 while (changeCount > 0) { // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第一步细化操作。 _thinDP1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDP2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDP3Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDP4Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount // 变量,进行迭代判断。 cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } } // 细化结束后释放申请的变量空间。 cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // static __global__ void _thinDPFour1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) // { // // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // // column,r 表示 row )。 // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // // 另一方面防止由于段错误导致程序崩溃。 // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // 定义目标点位置的指针。 // unsigned char *outptr; // // 获取当前像素点在图像中的相对位置。 // int curpos = dstr * tempimg.pitchBytes + dstc; // // 获取当前像素点在图像中的绝对位置。 // outptr = tempimg.imgMeta.imgData + curpos; // // 如果目标像素点的像素值为低像素, 则不进行细化处理。 // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p6); // int m2 = (p4 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // 获取当前像素点在图像中的绝对位置。 // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p6); // int m2 = (p4 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // } // } // static __global__ void _thinDPFour2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) // { // // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // // column,r 表示 row )。 // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // // 另一方面防止由于段错误导致程序崩溃。 // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // 定义目标点位置的指针。 // unsigned char *outptr; // // 获取当前像素点在图像中的相对位置。 // int curpos = dstr * tempimg.pitchBytes + dstc; // // 获取当前像素点在图像中的绝对位置。 // outptr = tempimg.imgMeta.imgData + curpos; // // 如果目标像素点的像素值为低像素, 则不进行细化处理。 // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p8); // int m2 = (p2 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // 获取当前像素点在图像中的绝对位置。 // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1] == HIGH; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1] == HIGH; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1] == HIGH; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2] == HIGH; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3] == HIGH; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3] == HIGH; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3] == HIGH; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2] == HIGH; // int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + // (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + // (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + // (p8 == 0 && p1 == 1) + (p1 == 0 && p2 == 1); // int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p1; // int m1 = (p2 * p4 * p8); // int m2 = (p2 * p6 * p8); // if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // } // } // // 直接并行化 // // 线程数,处理多少个点有多少线程数 // __host__ int Thinning::thinDPFour(Image *inimg, Image *outimg) // { // // 局部变量,错误码。 // int errcode; // cudaError_t cudaerrcode; // // 检查输入图像,输出图像是否为空。 // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // // 声明所有中间变量并初始化为空。 // Image *tempimg = NULL; // int *devchangecount = NULL; // // 记录细化点数的变量,位于 host 端。 // int changeCount; // // 记录细化点数的变量,位于 device 端。并为其申请空间。 // cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // 生成暂存图像。 // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 // // device 端。 // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 提取输出图像 // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 提取暂存图像 // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y*4 - 1) / blocksize.y*4; // // 赋值为 1,以便开始第一次迭代。 // changeCount = 1; // // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, // // 停止迭代。 // while (changeCount > 0) { // // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 // changeCount = 0; // cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), // cudaMemcpyHostToDevice); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // 调用核函数,开始第一步细化操作。 // _thinDPFour1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); // if (cudaGetLastError() != cudaSuccess) { // // 核函数出错,结束迭代函数,释放申请的变量空间。 // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // 调用核函数,开始第二步细化操作。 // _thinDPFour2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount); // if (cudaGetLastError() != cudaSuccess) { // // 核函数出错,结束迭代函数,释放申请的变量空间 。 // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount // // 变量,进行迭代判断。 // cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), // cudaMemcpyDeviceToHost); // if (cudaerrcode != cudaSuccess) { // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // } // // 细化结束后释放申请的变量空间。 // cudaFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // } static __global__ void _thinDPPt1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index]) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDPPt2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 256]) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDPPt3Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 512]) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } static __global__ void _thinDPPt4Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, uchar *dev_lut) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column,r 表示 row )。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; // 定义目标点位置的指针。 unsigned char *outptr; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * tempimg.pitchBytes + dstc; // 获取当前像素点在图像中的绝对位置。 outptr = tempimg.imgMeta.imgData + curpos; // 如果目标像素点的像素值为低像素, 则不进行细化处理。 if (*outptr != LOW) { // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // 防止下面细化处理时重复计算。 int posColumn1 = (dstr - 1) * tempimg.pitchBytes; int posColumn2 = posColumn1 + tempimg.pitchBytes; int posColumn3 = posColumn2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; unsigned char p4 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; unsigned char p6 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; unsigned char p7 = tempimg.imgMeta.imgData[dstc+ posColumn3]; unsigned char p8 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; if (dev_lut[index + 768]) { outimg.imgMeta.imgData[curpos] = LOW; // 记录删除点数的 devchangecount 值加 1 。 *devchangecount = 1; } } } __host__ int Thinning::thinDPPt(Image *inimg, Image *outimg) { // 局部变量,错误码。 int errcode; cudaError_t cudaerrcode; // 检查输入图像,输出图像是否为空。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 声明所有中间变量并初始化为空。 Image *tempimg = NULL; int *devchangecount = NULL; // 记录细化点数的变量,位于 host 端。 int changeCount; // 记录细化点数的变量,位于 device 端。并为其申请空间。 cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } uchar lut[1024] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; uchar *dev_lut; cudaerrcode = cudaMalloc((void **)&dev_lut, sizeof (uchar) * 1024); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; cudaerrcode = cudaMemcpy(dev_lut, lut, sizeof (uchar) * 1024, cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; // 生成暂存图像。 errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 // device 端。 errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 提取输出图像 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 提取暂存图像 ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { // FAIL_THIN_IMAGE_FREE; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 赋值为 1,以便开始第一次迭代。 changeCount = 1; // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, // 停止迭代。 while (changeCount > 0) { // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第一步细化操作。 _thinDPPt1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDPPt2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDPPt3Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // copy ouimg to tempimg cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } // 调用核函数,开始第二步细化操作。 _thinDPPt4Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间 。 // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount // 变量,进行迭代判断。 cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // FAIL_THIN_IMAGE_FREE; return CUDA_ERROR; } } // 细化结束后释放申请的变量空间。 cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // static __global__ void _thinDPPtFour1Ker(ImageCuda tempimg, ImageCuda outimg, // int *devchangecount, uchar *dev_lut) // { // // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // // column,r 表示 row )。 // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // // 另一方面防止由于段错误导致程序崩溃。 // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // 定义目标点位置的指针。 // unsigned char *outptr; // // 获取当前像素点在图像中的相对位置。 // int curpos = dstr * tempimg.pitchBytes + dstc; // // 获取当前像素点在图像中的绝对位置。 // outptr = tempimg.imgMeta.imgData + curpos; // // 如果目标像素点的像素值为低像素, 则不进行细化处理。 // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index]) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // 获取当前像素点在图像中的绝对位置。 // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index]) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // } // } // static __global__ void _thinDPPtFour2Ker(ImageCuda tempimg, ImageCuda outimg, // int *devchangecount, uchar *dev_lut) // { // // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // // column,r 表示 row )。 // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // // 另一方面防止由于段错误导致程序崩溃。 // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // // 定义目标点位置的指针。 // unsigned char *outptr; // // 获取当前像素点在图像中的相对位置。 // int curpos = dstr * tempimg.pitchBytes + dstc; // // 获取当前像素点在图像中的绝对位置。 // outptr = tempimg.imgMeta.imgData + curpos; // // 如果目标像素点的像素值为低像素, 则不进行细化处理。 // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index + 256]) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= outimg.imgMeta.height - 1) // return ; // curpos += outimg.pitchBytes; // // 获取当前像素点在图像中的绝对位置。 // outptr = outimg.imgMeta.imgData + curpos; // if (*outptr != LOW) { // // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, // // 防止下面细化处理时重复计算。 // int posColumn1 = (dstr - 1) * tempimg.pitchBytes; // int posColumn2 = posColumn1 + tempimg.pitchBytes; // int posColumn3 = posColumn2 + tempimg.pitchBytes; // // p1 p2 p3 // // p8 p4 // // p7 p6 p5 // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + posColumn1]; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ posColumn1]; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + posColumn1]; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + posColumn2]; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + posColumn3]; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ posColumn3]; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + posColumn3]; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + posColumn2]; // uchar index= (p1==HIGH)*1 + (p2==HIGH)*2 + (p3==HIGH)*4 + (p4==HIGH)*8 + // (p5==HIGH)*16 + (p6==HIGH)*32 + (p7==HIGH)*64 + (p8==HIGH)*128; // if (dev_lut[index + 256]) { // outimg.imgMeta.imgData[curpos] = LOW; // // 记录删除点数的 devchangecount 值加 1 。 // *devchangecount = 1; // } // } // } // } // __host__ int Thinning::thinDPPtFour(Image *inimg, Image *outimg) // { // // 局部变量,错误码。 // int errcode; // cudaError_t cudaerrcode; // // 检查输入图像,输出图像是否为空。 // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // // 声明所有中间变量并初始化为空。 // Image *tempimg = NULL; // int *devchangecount = NULL; // // 记录细化点数的变量,位于 host 端。 // int changeCount; // // 记录细化点数的变量,位于 device 端。并为其申请空间。 // cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // uchar lut[512] = // { // 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, // 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0 // }; // uchar *dev_lut; // cudaerrcode = cudaMalloc((void **)&dev_lut, sizeof (uchar) * 512); // if (cudaerrcode != cudaSuccess) // return CUDA_ERROR; // cudaerrcode = cudaMemcpy(dev_lut, lut, sizeof (uchar) * 512, // cudaMemcpyHostToDevice); // if (cudaerrcode != cudaSuccess) // return CUDA_ERROR; // // 生成暂存图像。 // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 // // device 端。 // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 提取输出图像 // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 提取暂存图像 // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // // FAIL_THIN_IMAGE_FREE; // return errcode; // } // // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; // // 赋值为 1,以便开始第一次迭代。 // changeCount = 1; // // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, // // 停止迭代。 // while (changeCount > 0) { // // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 // changeCount = 0; // cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), // cudaMemcpyHostToDevice); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // 调用核函数,开始第一步细化操作。 // _thinDPPtFour1Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); // if (cudaGetLastError() != cudaSuccess) { // // 核函数出错,结束迭代函数,释放申请的变量空间。 // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // copy ouimg to tempimg // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // // 调用核函数,开始第二步细化操作。 // _thinDPPtFour2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); // if (cudaGetLastError() != cudaSuccess) { // // 核函数出错,结束迭代函数,释放申请的变量空间 。 // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount // // 变量,进行迭代判断。 // cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), // cudaMemcpyDeviceToHost); // if (cudaerrcode != cudaSuccess) { // // FAIL_THIN_IMAGE_FREE; // return CUDA_ERROR; // } // } // // 细化结束后释放申请的变量空间。 // cudaFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // }
94e31f1abccd74e05658bee5bee2d425dc41517a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "definitions.h" #include "kernel.h" #include "MatInvLib.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_calCRLB(float *ParamF, float *ParamVar, int Nfit) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; int s, k; float FisherM[NPL*NPL]; float LowerBi[NPL*NPL]; float DiagLowerBi[NPL]; for (k = 0; k < NPL*NPL; k++) FisherM[k] = 0; for (k = 0; k < NPL*NPL; k++) { for (s = 0; s < NCH; s++) //Edited by FX FisherM[k] += ParamF[s*NPL*NPL*Nfit + j*NPL*NPL + k]; } kernel_MatInvN(FisherM, LowerBi, DiagLowerBi, NPL); for (k = 0; k < NPL; k++) ParamVar[j*NPL + k] = DiagLowerBi[k]; } __global__ void kernel_calFisherM(float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, float *gainR, float *ParamF, int Q, int Nfit, int PSFSize){ const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; int t, k, i, s; float PSFa0; float funFi1[NPL]; float tmp1; float FisherM[NPL*NPL]; float w; for (i = 0; i < NPL*NPL; i++) FisherM[i] = 0; for (i = 0; i < PSFSize; i++) { PSFa0 = PSF[j*PSFSize + i] * I[j] + bg[j] + gainR[j*PSFSize + i]; //x funFi1[0] = dPSFx[j*PSFSize + i] * I[j]; //y funFi1[1] = dPSFy[j*PSFSize + i] * I[j]; //z funFi1[2] = dPSFz[j*PSFSize + i] * I[j]; for (s = 0; s < NCH; s++) //edited by FX { w = (Q == s ? 1.0 : 0.0); // I funFi1[3 + s] = PSF[j*PSFSize + i] * w; // bg funFi1[3 + NCH + s] = w; } for (t = 0; t < NPL; t++) { for (k = 0; k < NPL; k++) { tmp1 = funFi1[t] * funFi1[k] / fmaxf(PSFa0, 1e-4f); FisherM[t*NPL + k] += tmp1; } } } for (k = 0; k < NPL*NPL; k++) ParamF[j*NPL*NPL + k] = FisherM[k]; }
94e31f1abccd74e05658bee5bee2d425dc41517a.cu
#include "cuda_runtime.h" #include "definitions.h" #include "kernel.h" #include "MatInvLib.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_calCRLB(float *ParamF, float *ParamVar, int Nfit) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; int s, k; float FisherM[NPL*NPL]; float LowerBi[NPL*NPL]; float DiagLowerBi[NPL]; for (k = 0; k < NPL*NPL; k++) FisherM[k] = 0; for (k = 0; k < NPL*NPL; k++) { for (s = 0; s < NCH; s++) //Edited by FX FisherM[k] += ParamF[s*NPL*NPL*Nfit + j*NPL*NPL + k]; } kernel_MatInvN(FisherM, LowerBi, DiagLowerBi, NPL); for (k = 0; k < NPL; k++) ParamVar[j*NPL + k] = DiagLowerBi[k]; } __global__ void kernel_calFisherM(float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, float *gainR, float *ParamF, int Q, int Nfit, int PSFSize){ const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; int t, k, i, s; float PSFa0; float funFi1[NPL]; float tmp1; float FisherM[NPL*NPL]; float w; for (i = 0; i < NPL*NPL; i++) FisherM[i] = 0; for (i = 0; i < PSFSize; i++) { PSFa0 = PSF[j*PSFSize + i] * I[j] + bg[j] + gainR[j*PSFSize + i]; //x funFi1[0] = dPSFx[j*PSFSize + i] * I[j]; //y funFi1[1] = dPSFy[j*PSFSize + i] * I[j]; //z funFi1[2] = dPSFz[j*PSFSize + i] * I[j]; for (s = 0; s < NCH; s++) //edited by FX { w = (Q == s ? 1.0 : 0.0); // I funFi1[3 + s] = PSF[j*PSFSize + i] * w; // bg funFi1[3 + NCH + s] = w; } for (t = 0; t < NPL; t++) { for (k = 0; k < NPL; k++) { tmp1 = funFi1[t] * funFi1[k] / fmaxf(PSFa0, 1e-4f); FisherM[t*NPL + k] += tmp1; } } } for (k = 0; k < NPL*NPL; k++) ParamF[j*NPL*NPL + k] = FisherM[k]; }
2c2d05440035f129433cd59cd95320272dcfadb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <helper_math.h> #include <helper_functions.h> #include <helper_cuda.h> // CUDA device initialization helper functions #include "BilateralFilterCuda.hpp" /* Perform a simple bilateral filter. Bilateral filter is a nonlinear filter that is a mixture of range filter and domain filter, the previous one preserves crisp edges and the latter one filters noise. The intensity value at each pixel in an image is replaced by a weighted average of intensity values from nearby pixels. The weight factor is calculated by the product of domain filter component(using the gaussian distribution as a spatial distance) as well as range filter component(Euclidean distance between center pixel and the current neighbor pixel). Because this process is nonlinear, the sample just uses a simple pixel by pixel step. Texture fetches automatically clamp to edge of image. 1D gaussian array is mapped to a 1D texture instead of using shared memory, which may cause severe bank conflict. Threads are y-pass(column-pass), because the output is coalesced. Parameters od - pointer to output data in global memory d_f - pointer to the 1D gaussian array e_d - euclidean delta w - image width h - image height r - filter radius */ //// GLOBALS __constant__ float cGaussian[64]; //gaussian array in device side typedef texture<uchar4, 2, hipReadModeNormalizedFloat> TextureU4f; TextureU4f rgbaTex; typedef texture<uchar4, 2, hipReadModeNormalizedFloat> TextureU4f; TextureU4f guideTex; typedef texture<float, 2, hipReadModeElementType> Texture32FC1; Texture32FC1 depthTex_32FC1; typedef texture<ushort, 2, hipReadModeNormalizedFloat> Texture16UC1; Texture16UC1 depthTex_16UC1; // cost volume //texture<float, 3, hipReadModeNormalizedFloat> texCostVolume3D; // 3D texture //hipArray *d_volumeArray = 0; //// HELPERS // Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2) __device__ float euclideanLen(float4 a, float4 b, float d) { float mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return __expf(-mod / (2.f * d * d)); } __device__ float yangRangeDist( float4 a, float4 b, float d ) { float mod = ( fabs(b.x - a.x) + fabs(b.y - a.y) + fabs(b.z - a.z) ) / 3.f; return __expf(-mod / d); } __device__ float euclideanLen( float a, float b, float d ) { float diff = (b - a); return __expf( -(diff*diff) / (2.f * d * d) ); } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0] rgba.y = __saturatef(fabs(rgba.y)); rgba.z = __saturatef(fabs(rgba.z)); rgba.w = __saturatef(fabs(rgba.w)); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } //// PRECOMPUTATION /* Because a 2D gaussian mask is symmetry in row and column, here only generate a 1D mask, and use the product by row and column index later. 1D gaussian distribution : g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier parameters: og - output gaussian array in global memory delta - the 2nd parameter 'd' in the above function radius - half of the filter size (total filter size = 2 * radius + 1) */ extern "C" void updateGaussian( float delta, int radius ) { float fGaussian[64]; for ( int i = 0; i < 2*radius + 1; ++i ) { float x = i-radius; fGaussian[i] = expf(-(x*x) / (2*delta*delta)); // orig //fGaussian[i] = expf(-(x*x) / (2*delta*delta)); // Yang? } checkCudaErrors(hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1))); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// CrossBilateral 32FC1 /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NOTE: there is no point using <ushort> textures, // since that would normalise over 65536 and not 10001 /* * @brief Input type dependent texture read function * @param T Texture selector (depthTex_32FC1, or depthTex_16UC1) * @param x x coordinate to read from in texture * @param y y coordinate to read from in texture */ template <typename T> __device__ float fetchTexture( int x, int y ); template<> float fetchTexture<float>( int x, int y ) { return tex2D( depthTex_32FC1, x, y ); } template<> float fetchTexture<ushort>( int x, int y ) { return tex2D( depthTex_16UC1, x, y ); } /* * @brief Main crossfilter kernel * @param dOut normalised float output memory * @param w texture width * @param h texture height * @param outPitch elementcount of one row of dOut * @param costVolume w x (h * costVolumeZDim) read/write global array * @param costVolumePitch elementcount of one row of costVolume * @param costVolumeZDim depth of costVolume * @param e_d eucledian delta (range sigma) * @param r kernel half width */ template <typename T> __global__ void d_cross_bilateral_filterF( T *dOut, int w, int h, size_t outPitch, //float *costVolume, size_t costVolumePitch, uint costVolumeZDim, float e_d, int r, unsigned char fillMode = FILL_ALL | SKIP_ZEROS ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float4 guideCenter = tex2D( guideTex, x, y ); T centerPix = fetchTexture<T>( x, y ); // check for early exit if ( !(fillMode & FILL_ZEROS) && (centerPix != 0.f) ) // if 0, and NOT FILL_ZEROS { dOut[y * outPitch + x] = centerPix; return; } // estimate cost volume for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { // read depth T curPix = fetchTexture<T>( x+j, y+i ); // skip, if no data if ( (fillMode & SKIP_ZEROS) && (curPix == 0.f) ) continue; // read rgb float4 guidePix = tex2D( guideTex, x + j, y + i ); // estimate weight factor = cGaussian[i + r] * cGaussian[j + r] * // spatial factor //expf( -sqrt(i*i+j*j) / e_d ) * yangRangeDist( guidePix, guideCenter, e_d ); // range factor // accumulate t += factor * curPix; sum += factor; } } if ( sum > 0.f ) dOut[y * outPitch + x] = t / sum; else dOut[y * outPitch + x] = centerPix; } /* * @brief Texture binding based on input template type (float tested only) * @param texRefPtr Cuda reference pointer to one of the globals at top of the file. */ template <typename TImg> void prepareInputTex( textureReference const*& ); // <float> expects dImage to be normalised float template<> void prepareInputTex<float>( textureReference const*& texRefPtr ) { hipGetTextureReference( &texRefPtr, &depthTex_32FC1 ); } // <ushort> expects dImage to be 0..65536 template<> void prepareInputTex<ushort>( textureReference const*& texRefPtr ) { hipGetTextureReference( &texRefPtr, &depthTex_16UC1 ); } /* * @brief Cross biltareal filtering. Use <float> version, the others are untested. * @param dDest Device pointer, currently giving normalised floats * @param dImage Input pointer, currently expecting normalised floats * @param dTemp Copy buffer for dImage * @param pitch dImage and dTemp pitch, not used (since texturing) * @param dGuide uchar4 XRGB image (0..255) read as normalised float through "guideTex" * @param guidePitch dGuide pitch, not used, since texturing * @param width Width of every input, and output * @param height Height of every input and output * @param e_d Eucledian delta (range sigma) * @param radius Kernel half width * @param iterations Not tested to be other, than one * @param timer Performance timing */ template <typename T> double crossBilateralFilterF( T *dDest, uint destPitch, T *dImage, T *dTemp, uint imagePitch, uint *dGuide, uint guidePitch, //float *dCostVolume, uint costVolumePitch, hipExtent volumeSize, float e_d, int radius, int iterations , unsigned char fillMode, StopWatchInterface *timer ) { // var for kernel computation timing double dKernelTime; depthTex_16UC1.addressMode[0] = hipAddressModeMirror; depthTex_16UC1.addressMode[1] = hipAddressModeMirror; depthTex_32FC1.addressMode[0] = hipAddressModeMirror; depthTex_32FC1.addressMode[1] = hipAddressModeMirror; // bind input image texture textureReference const* texRefPtr; prepareInputTex<T>( texRefPtr ); // Bind inpput image to the texture hipChannelFormatDesc descT = hipCreateChannelDesc<T>(); size_t offset = 0; checkCudaErrors( hipBindTexture2D(&offset, texRefPtr, dImage, &descT, volumeSize.width, volumeSize.height, imagePitch) ); if ( offset > 0 ) std::cerr << "hipBindTexture2D returned non-zero offset!!!" << std::endl; // Bind guide texture hipChannelFormatDesc descU4 = hipCreateChannelDesc<uchar4>(); checkCudaErrors( hipBindTexture2D(&offset, guideTex, dGuide, descU4, volumeSize.width, volumeSize.height, guidePitch) ); if ( offset > 0 ) { std::cerr << "hipBindTexture2D returne non-zero offset!!!" << std::endl; } // work for ( int i = 0; i < iterations; ++i ) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((volumeSize.width + 16 - 1) / 16, (volumeSize.height + 16 - 1) / 16); dim3 blockSize(16, 16); hipLaunchKernelGGL(( d_cross_bilateral_filterF), dim3(gridSize), dim3(blockSize), 0, 0, dDest, volumeSize.width, volumeSize.height, destPitch / sizeof(T), //dCostVolume, costVolumePitch / sizeof(float), volumeSize.depth, e_d, radius, ( (fillMode == FILL_ALL_THEN_FILL_ZEROS) ? ( (i>0) ? (FILL_ZEROS | SKIP_ZEROS) : (FILL_ALL | SKIP_ZEROS) ) : fillMode ) ///* fillOnlyZeros: */ (fillMode == FILL_ALL) ? false : // ( fillMode == FILL_ONLY_ZEROS ? true : (i>0) ) ); // sync host and stop computation timer checkCudaErrors(hipDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(hipMemcpy2D(dTemp, imagePitch, dDest, sizeof(T) * volumeSize.width, sizeof(T) * volumeSize.width, volumeSize.height, hipMemcpyDeviceToDevice)); //checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, hipCreateChannelDesc<T>(), width, height, pitch)); checkCudaErrors( hipBindTexture2D(&offset, texRefPtr, dTemp, &descT, volumeSize.width, volumeSize.height, imagePitch) ); } } return ((dKernelTime/1000.)/(double)iterations); } /* * @brief Template specialisation declaration (needed by "extern" in BilateralFilteringCuda.hpp) */ template double crossBilateralFilterF( float *dDest, uint destPitch, float *dImage, float *dTemp, uint imagePitch, uint *dGuide, uint guidePitch, //float *dCostVolume, uint costVolumePitch, hipExtent volumeSize, float e_d, int radius, int iterations, unsigned char fillOnlyZeros, StopWatchInterface *timer ); #if 0 // Not implemented yet... template double crossBilateralFilterF( ushort *dDest, ushort *dImage, ushort *dTemp, uint pitch, unsigned *dGuide, unsigned guidePitch, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer ); #endif template <typename T> __global__ void d_cross_bilateral_filterCV( T *dOut, int w, int h, size_t outPitch, float *costVolume, size_t costVolumePitch, uint costVolumeZDim, float e_d, int r, bool onlyZeros = false ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float4 guideCenter = tex2D( guideTex, x, y ); T centerPix = fetchTexture<T>( x, y ); // check for early exit /*if ( onlyZeros && (centerPix != 0.f) ) { dOut[y * outPitch + x] = centerPix; return; }*/ // estimate cost volume for ( int z = 0; z < costVolumeZDim; ++z ) { for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { // read depth T curPix = fetchTexture<T>( x+j, y+i ); // skip, if no data if ( curPix == 0.f ) continue; // read rgb float4 guidePix = tex2D( guideTex, x + j, y + i ); // estimate weight factor = cGaussian[i + r] * cGaussian[j + r] * // spatial factor euclideanLen( guidePix, guideCenter, e_d ); // range factor // accumulate t += factor * curPix; sum += factor; } } // images are continuosly stored below each other in costVolume costVolume[ (z * h + y) * costVolumePitch + x ] = t / sum; // old if ( z == costVolumeZDim / 2 ) dOut[y * outPitch + x] = t / sum; } // end for z } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //// /// Bilateral RGBA (8UC4) //// __global__ void d_bilateral_filterRGBA( uint *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(rgbaTex, x, y); for (int i = -r; i <= r; ++i) { for (int j = -r; j <= r; ++j) { float4 curPix = tex2D(rgbaTex, x + j, y + i); if ( curPix.x == 0 ) continue; factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(curPix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } /* Perform 2D bilateral filter on image using CUDA Parameters: d_dest - pointer to destination image in device memory width - image width height - image height e_d - euclidean delta radius - filter radius iterations - number of iterations */ extern "C" double bilateralFilterRGBA(uint *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer, uint* dImage, uint* dTemp, uint pitch ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); size_t offset = 0; checkCudaErrors( hipBindTexture2D(&offset, rgbaTex, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "hipBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); hipLaunchKernelGGL(( d_bilateral_filterRGBA), dim3(gridSize), dim3(blockSize), 0, 0, dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(hipDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(hipMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, hipMemcpyDeviceToDevice)); checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } //// /// Bilateral 32FC1 //// __global__ void d_bilateral_filterF( float *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float center = tex2D( depthTex_32FC1, x, y ); for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { float curPix = tex2D(depthTex_32FC1, x + j, y + i); if ( curPix == 0.f ) // skip, if empty continue; factor = cGaussian[i + r] * cGaussian[j + r] * // domain factor euclideanLen(curPix, center, e_d); // range factor t += factor * curPix; sum += factor; } } // output od[y * w + x] = t / sum; } extern "C" double bilateralFilterF( float *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer, float* dImage, float* dTemp, uint pitch ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); size_t offset = 0; checkCudaErrors( hipBindTexture2D(&offset, depthTex_32FC1, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "hipBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); hipLaunchKernelGGL(( d_bilateral_filterF), dim3(gridSize), dim3(blockSize), 0, 0, dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(hipDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(hipMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, hipMemcpyDeviceToDevice)); checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } //// /// CrossBilateral RGBA (8UC4) //// __global__ void d_cross_bilateral_filterRGBA( uint *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(guideTex, x, y); for (int i = -r; i <= r; ++i) { for (int j = -r; j <= r; ++j) { float4 curPix = tex2D( rgbaTex, x + j, y + i ); float4 guidePix = tex2D( guideTex, x + j, y + i ); if ( curPix.x == 0 ) continue; factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(guidePix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } extern "C" double crossBilateralFilterRGBA( uint *dDest, uint *dImage, uint *dTemp, uint pitch, uint *dGuide, uint guidePitch, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); size_t offset = 0; checkCudaErrors( hipBindTexture2D(&offset, rgbaTex, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "hipBindTexture2D returne non-zero offset!!!" << std::endl; } checkCudaErrors( hipBindTexture2D(&offset, guideTex, dGuide, desc, width, height, guidePitch) ); if ( offset > 0 ) { std::cerr << "hipBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); hipLaunchKernelGGL(( d_cross_bilateral_filterRGBA), dim3(gridSize), dim3(blockSize), 0, 0, dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(hipDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(hipMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, hipMemcpyDeviceToDevice)); checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } /* // Cost volume { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipExtent volumeSize = make_hipExtent( width, height, 9 ); checkCudaErrors( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // set texture parameters texCostVolume3D.normalized = false; // access with normalized texture coordinates texCostVolume3D.filterMode = hipFilterModePoint; // linear interpolation texCostVolume3D.addressMode[0] = hipAddressModeBorder; // wrap texture coordinates texCostVolume3D.addressMode[1] = hipAddressModeBorder; texCostVolume3D.addressMode[2] = hipAddressModeBorder; // bind array to 3D texture checkCudaErrors( hipBindTextureToArray(texCostVolume3D, d_volumeArray, channelDesc) ); } */
2c2d05440035f129433cd59cd95320272dcfadb1.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <helper_math.h> #include <helper_functions.h> #include <helper_cuda.h> // CUDA device initialization helper functions #include "BilateralFilterCuda.hpp" /* Perform a simple bilateral filter. Bilateral filter is a nonlinear filter that is a mixture of range filter and domain filter, the previous one preserves crisp edges and the latter one filters noise. The intensity value at each pixel in an image is replaced by a weighted average of intensity values from nearby pixels. The weight factor is calculated by the product of domain filter component(using the gaussian distribution as a spatial distance) as well as range filter component(Euclidean distance between center pixel and the current neighbor pixel). Because this process is nonlinear, the sample just uses a simple pixel by pixel step. Texture fetches automatically clamp to edge of image. 1D gaussian array is mapped to a 1D texture instead of using shared memory, which may cause severe bank conflict. Threads are y-pass(column-pass), because the output is coalesced. Parameters od - pointer to output data in global memory d_f - pointer to the 1D gaussian array e_d - euclidean delta w - image width h - image height r - filter radius */ //// GLOBALS __constant__ float cGaussian[64]; //gaussian array in device side typedef texture<uchar4, 2, cudaReadModeNormalizedFloat> TextureU4f; TextureU4f rgbaTex; typedef texture<uchar4, 2, cudaReadModeNormalizedFloat> TextureU4f; TextureU4f guideTex; typedef texture<float, 2, cudaReadModeElementType> Texture32FC1; Texture32FC1 depthTex_32FC1; typedef texture<ushort, 2, cudaReadModeNormalizedFloat> Texture16UC1; Texture16UC1 depthTex_16UC1; // cost volume //texture<float, 3, cudaReadModeNormalizedFloat> texCostVolume3D; // 3D texture //cudaArray *d_volumeArray = 0; //// HELPERS // Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2) __device__ float euclideanLen(float4 a, float4 b, float d) { float mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return __expf(-mod / (2.f * d * d)); } __device__ float yangRangeDist( float4 a, float4 b, float d ) { float mod = ( fabs(b.x - a.x) + fabs(b.y - a.y) + fabs(b.z - a.z) ) / 3.f; return __expf(-mod / d); } __device__ float euclideanLen( float a, float b, float d ) { float diff = (b - a); return __expf( -(diff*diff) / (2.f * d * d) ); } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0] rgba.y = __saturatef(fabs(rgba.y)); rgba.z = __saturatef(fabs(rgba.z)); rgba.w = __saturatef(fabs(rgba.w)); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } //// PRECOMPUTATION /* Because a 2D gaussian mask is symmetry in row and column, here only generate a 1D mask, and use the product by row and column index later. 1D gaussian distribution : g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier parameters: og - output gaussian array in global memory delta - the 2nd parameter 'd' in the above function radius - half of the filter size (total filter size = 2 * radius + 1) */ extern "C" void updateGaussian( float delta, int radius ) { float fGaussian[64]; for ( int i = 0; i < 2*radius + 1; ++i ) { float x = i-radius; fGaussian[i] = expf(-(x*x) / (2*delta*delta)); // orig //fGaussian[i] = expf(-(x*x) / (2*delta*delta)); // Yang? } checkCudaErrors(cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1))); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// CrossBilateral 32FC1 /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NOTE: there is no point using <ushort> textures, // since that would normalise over 65536 and not 10001 /* * @brief Input type dependent texture read function * @param T Texture selector (depthTex_32FC1, or depthTex_16UC1) * @param x x coordinate to read from in texture * @param y y coordinate to read from in texture */ template <typename T> __device__ float fetchTexture( int x, int y ); template<> float fetchTexture<float>( int x, int y ) { return tex2D( depthTex_32FC1, x, y ); } template<> float fetchTexture<ushort>( int x, int y ) { return tex2D( depthTex_16UC1, x, y ); } /* * @brief Main crossfilter kernel * @param dOut normalised float output memory * @param w texture width * @param h texture height * @param outPitch elementcount of one row of dOut * @param costVolume w x (h * costVolumeZDim) read/write global array * @param costVolumePitch elementcount of one row of costVolume * @param costVolumeZDim depth of costVolume * @param e_d eucledian delta (range sigma) * @param r kernel half width */ template <typename T> __global__ void d_cross_bilateral_filterF( T *dOut, int w, int h, size_t outPitch, //float *costVolume, size_t costVolumePitch, uint costVolumeZDim, float e_d, int r, unsigned char fillMode = FILL_ALL | SKIP_ZEROS ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float4 guideCenter = tex2D( guideTex, x, y ); T centerPix = fetchTexture<T>( x, y ); // check for early exit if ( !(fillMode & FILL_ZEROS) && (centerPix != 0.f) ) // if 0, and NOT FILL_ZEROS { dOut[y * outPitch + x] = centerPix; return; } // estimate cost volume for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { // read depth T curPix = fetchTexture<T>( x+j, y+i ); // skip, if no data if ( (fillMode & SKIP_ZEROS) && (curPix == 0.f) ) continue; // read rgb float4 guidePix = tex2D( guideTex, x + j, y + i ); // estimate weight factor = cGaussian[i + r] * cGaussian[j + r] * // spatial factor //expf( -sqrt(i*i+j*j) / e_d ) * yangRangeDist( guidePix, guideCenter, e_d ); // range factor // accumulate t += factor * curPix; sum += factor; } } if ( sum > 0.f ) dOut[y * outPitch + x] = t / sum; else dOut[y * outPitch + x] = centerPix; } /* * @brief Texture binding based on input template type (float tested only) * @param texRefPtr Cuda reference pointer to one of the globals at top of the file. */ template <typename TImg> void prepareInputTex( textureReference const*& ); // <float> expects dImage to be normalised float template<> void prepareInputTex<float>( textureReference const*& texRefPtr ) { cudaGetTextureReference( &texRefPtr, &depthTex_32FC1 ); } // <ushort> expects dImage to be 0..65536 template<> void prepareInputTex<ushort>( textureReference const*& texRefPtr ) { cudaGetTextureReference( &texRefPtr, &depthTex_16UC1 ); } /* * @brief Cross biltareal filtering. Use <float> version, the others are untested. * @param dDest Device pointer, currently giving normalised floats * @param dImage Input pointer, currently expecting normalised floats * @param dTemp Copy buffer for dImage * @param pitch dImage and dTemp pitch, not used (since texturing) * @param dGuide uchar4 XRGB image (0..255) read as normalised float through "guideTex" * @param guidePitch dGuide pitch, not used, since texturing * @param width Width of every input, and output * @param height Height of every input and output * @param e_d Eucledian delta (range sigma) * @param radius Kernel half width * @param iterations Not tested to be other, than one * @param timer Performance timing */ template <typename T> double crossBilateralFilterF( T *dDest, uint destPitch, T *dImage, T *dTemp, uint imagePitch, uint *dGuide, uint guidePitch, //float *dCostVolume, uint costVolumePitch, cudaExtent volumeSize, float e_d, int radius, int iterations , unsigned char fillMode, StopWatchInterface *timer ) { // var for kernel computation timing double dKernelTime; depthTex_16UC1.addressMode[0] = cudaAddressModeMirror; depthTex_16UC1.addressMode[1] = cudaAddressModeMirror; depthTex_32FC1.addressMode[0] = cudaAddressModeMirror; depthTex_32FC1.addressMode[1] = cudaAddressModeMirror; // bind input image texture textureReference const* texRefPtr; prepareInputTex<T>( texRefPtr ); // Bind inpput image to the texture cudaChannelFormatDesc descT = cudaCreateChannelDesc<T>(); size_t offset = 0; checkCudaErrors( cudaBindTexture2D(&offset, texRefPtr, dImage, &descT, volumeSize.width, volumeSize.height, imagePitch) ); if ( offset > 0 ) std::cerr << "cudaBindTexture2D returned non-zero offset!!!" << std::endl; // Bind guide texture cudaChannelFormatDesc descU4 = cudaCreateChannelDesc<uchar4>(); checkCudaErrors( cudaBindTexture2D(&offset, guideTex, dGuide, descU4, volumeSize.width, volumeSize.height, guidePitch) ); if ( offset > 0 ) { std::cerr << "cudaBindTexture2D returne non-zero offset!!!" << std::endl; } // work for ( int i = 0; i < iterations; ++i ) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((volumeSize.width + 16 - 1) / 16, (volumeSize.height + 16 - 1) / 16); dim3 blockSize(16, 16); d_cross_bilateral_filterF<<< gridSize, blockSize>>>( dDest, volumeSize.width, volumeSize.height, destPitch / sizeof(T), //dCostVolume, costVolumePitch / sizeof(float), volumeSize.depth, e_d, radius, ( (fillMode == FILL_ALL_THEN_FILL_ZEROS) ? ( (i>0) ? (FILL_ZEROS | SKIP_ZEROS) : (FILL_ALL | SKIP_ZEROS) ) : fillMode ) ///* fillOnlyZeros: */ (fillMode == FILL_ALL) ? false : // ( fillMode == FILL_ONLY_ZEROS ? true : (i>0) ) ); // sync host and stop computation timer checkCudaErrors(cudaDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(cudaMemcpy2D(dTemp, imagePitch, dDest, sizeof(T) * volumeSize.width, sizeof(T) * volumeSize.width, volumeSize.height, cudaMemcpyDeviceToDevice)); //checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, cudaCreateChannelDesc<T>(), width, height, pitch)); checkCudaErrors( cudaBindTexture2D(&offset, texRefPtr, dTemp, &descT, volumeSize.width, volumeSize.height, imagePitch) ); } } return ((dKernelTime/1000.)/(double)iterations); } /* * @brief Template specialisation declaration (needed by "extern" in BilateralFilteringCuda.hpp) */ template double crossBilateralFilterF( float *dDest, uint destPitch, float *dImage, float *dTemp, uint imagePitch, uint *dGuide, uint guidePitch, //float *dCostVolume, uint costVolumePitch, cudaExtent volumeSize, float e_d, int radius, int iterations, unsigned char fillOnlyZeros, StopWatchInterface *timer ); #if 0 // Not implemented yet... template double crossBilateralFilterF( ushort *dDest, ushort *dImage, ushort *dTemp, uint pitch, unsigned *dGuide, unsigned guidePitch, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer ); #endif template <typename T> __global__ void d_cross_bilateral_filterCV( T *dOut, int w, int h, size_t outPitch, float *costVolume, size_t costVolumePitch, uint costVolumeZDim, float e_d, int r, bool onlyZeros = false ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float4 guideCenter = tex2D( guideTex, x, y ); T centerPix = fetchTexture<T>( x, y ); // check for early exit /*if ( onlyZeros && (centerPix != 0.f) ) { dOut[y * outPitch + x] = centerPix; return; }*/ // estimate cost volume for ( int z = 0; z < costVolumeZDim; ++z ) { for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { // read depth T curPix = fetchTexture<T>( x+j, y+i ); // skip, if no data if ( curPix == 0.f ) continue; // read rgb float4 guidePix = tex2D( guideTex, x + j, y + i ); // estimate weight factor = cGaussian[i + r] * cGaussian[j + r] * // spatial factor euclideanLen( guidePix, guideCenter, e_d ); // range factor // accumulate t += factor * curPix; sum += factor; } } // images are continuosly stored below each other in costVolume costVolume[ (z * h + y) * costVolumePitch + x ] = t / sum; // old if ( z == costVolumeZDim / 2 ) dOut[y * outPitch + x] = t / sum; } // end for z } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //// /// Bilateral RGBA (8UC4) //// __global__ void d_bilateral_filterRGBA( uint *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(rgbaTex, x, y); for (int i = -r; i <= r; ++i) { for (int j = -r; j <= r; ++j) { float4 curPix = tex2D(rgbaTex, x + j, y + i); if ( curPix.x == 0 ) continue; factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(curPix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } /* Perform 2D bilateral filter on image using CUDA Parameters: d_dest - pointer to destination image in device memory width - image width height - image height e_d - euclidean delta radius - filter radius iterations - number of iterations */ extern "C" double bilateralFilterRGBA(uint *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer, uint* dImage, uint* dTemp, uint pitch ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); size_t offset = 0; checkCudaErrors( cudaBindTexture2D(&offset, rgbaTex, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "cudaBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); d_bilateral_filterRGBA<<< gridSize, blockSize>>>( dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(cudaDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(cudaMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } //// /// Bilateral 32FC1 //// __global__ void d_bilateral_filterF( float *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float t = 0.f; float center = tex2D( depthTex_32FC1, x, y ); for ( int i = -r; i <= r; ++i ) { for ( int j = -r; j <= r; ++j ) { float curPix = tex2D(depthTex_32FC1, x + j, y + i); if ( curPix == 0.f ) // skip, if empty continue; factor = cGaussian[i + r] * cGaussian[j + r] * // domain factor euclideanLen(curPix, center, e_d); // range factor t += factor * curPix; sum += factor; } } // output od[y * w + x] = t / sum; } extern "C" double bilateralFilterF( float *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer, float* dImage, float* dTemp, uint pitch ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); size_t offset = 0; checkCudaErrors( cudaBindTexture2D(&offset, depthTex_32FC1, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "cudaBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); d_bilateral_filterF<<< gridSize, blockSize>>>( dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(cudaDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(cudaMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } //// /// CrossBilateral RGBA (8UC4) //// __global__ void d_cross_bilateral_filterRGBA( uint *od, int w, int h, float e_d, int r ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(guideTex, x, y); for (int i = -r; i <= r; ++i) { for (int j = -r; j <= r; ++j) { float4 curPix = tex2D( rgbaTex, x + j, y + i ); float4 guidePix = tex2D( guideTex, x + j, y + i ); if ( curPix.x == 0 ) continue; factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(guidePix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } extern "C" double crossBilateralFilterRGBA( uint *dDest, uint *dImage, uint *dTemp, uint pitch, uint *dGuide, uint guidePitch, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer ) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); size_t offset = 0; checkCudaErrors( cudaBindTexture2D(&offset, rgbaTex, dImage, desc, width, height, pitch) ); if ( offset > 0 ) { std::cerr << "cudaBindTexture2D returne non-zero offset!!!" << std::endl; } checkCudaErrors( cudaBindTexture2D(&offset, guideTex, dGuide, desc, width, height, guidePitch) ); if ( offset > 0 ) { std::cerr << "cudaBindTexture2D returne non-zero offset!!!" << std::endl; } for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); d_cross_bilateral_filterRGBA<<< gridSize, blockSize>>>( dDest, width, height, e_d, radius ); // sync host and stop computation timer checkCudaErrors(cudaDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(cudaMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); } /* // Cost volume { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaExtent volumeSize = make_cudaExtent( width, height, 9 ); checkCudaErrors( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // set texture parameters texCostVolume3D.normalized = false; // access with normalized texture coordinates texCostVolume3D.filterMode = cudaFilterModePoint; // linear interpolation texCostVolume3D.addressMode[0] = cudaAddressModeBorder; // wrap texture coordinates texCostVolume3D.addressMode[1] = cudaAddressModeBorder; texCostVolume3D.addressMode[2] = cudaAddressModeBorder; // bind array to 3D texture checkCudaErrors( cudaBindTextureToArray(texCostVolume3D, d_volumeArray, channelDesc) ); } */
46c1e831aa764dc483fb50750287cb728eb0d1db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcXsphWS.cu * * Created on: 16-12-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real3 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y)) * par->I_H; if (q < 2.0) { real g = kern(q, par->I_H); switch (par->T_XSPH) { case 1: return MAKE_REAL3(p[j].m*p[j].vel.x*g / p[j].d, p[j].m*p[j].vel.y*g / p[j].d, g*p[j].m / p[j].d); case 2: if (p[i].c == p[j].c) { return MAKE_REAL3(p[j].m*p[j].vel.x*g / p[j].d, p[j].m*p[j].vel.y*g / p[j].d, g*p[j].m / p[j].d); } else { return MAKE_REAL3(0.0, 0.0, 0.0); } case 3: return MAKE_REAL3(p[j].m*p[j].vel.x*g, p[j].m*p[j].vel.y*g, g*p[j].m); default: return MAKE_REAL3(0.0, 0.0, 0.0); } } else { return MAKE_REAL3(0.0, 0.0, 0.0); } } __global__ void calcXsphWS(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real3 result = MAKE_REAL3(0.0, 0.0, 0.0); #include "../../methods/interactions/interactionsPositiveOnWallNoSlip.cuh" p[index].rh_pos.x = result.x / result.z; p[index].rh_pos.y = result.y / result.z; } }
46c1e831aa764dc483fb50750287cb728eb0d1db.cu
/* * calcXsphWS.cu * * Created on: 16-12-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real3 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y)) * par->I_H; if (q < 2.0) { real g = kern(q, par->I_H); switch (par->T_XSPH) { case 1: return MAKE_REAL3(p[j].m*p[j].vel.x*g / p[j].d, p[j].m*p[j].vel.y*g / p[j].d, g*p[j].m / p[j].d); case 2: if (p[i].c == p[j].c) { return MAKE_REAL3(p[j].m*p[j].vel.x*g / p[j].d, p[j].m*p[j].vel.y*g / p[j].d, g*p[j].m / p[j].d); } else { return MAKE_REAL3(0.0, 0.0, 0.0); } case 3: return MAKE_REAL3(p[j].m*p[j].vel.x*g, p[j].m*p[j].vel.y*g, g*p[j].m); default: return MAKE_REAL3(0.0, 0.0, 0.0); } } else { return MAKE_REAL3(0.0, 0.0, 0.0); } } __global__ void calcXsphWS(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real3 result = MAKE_REAL3(0.0, 0.0, 0.0); #include "../../methods/interactions/interactionsPositiveOnWallNoSlip.cuh" p[index].rh_pos.x = result.x / result.z; p[index].rh_pos.y = result.y / result.z; } }
406c18d42252580db5667f59c02d8442385b2900.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> __global__ void cuda_vector_add(int *a, int *b) { __shared__ int results[64]; // Actually we don't need this, just for illustration int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x; int local_thread_id = threadIdx.x; results[local_thread_id] = a[global_thread_id] + b[global_thread_id]; __syncthreads(); a[global_thread_id] = results[local_thread_id]; } int main(int argc, char **argv) { int n, nBytes; n = atoi(argv[1]); n = (n + 63) / 64 * 64; nBytes = sizeof(int) * n; printf("Vector add, length = %d\n", n); int *h_a, *h_b, *d_a, *d_b; // Allocate memory on host h_a = (int*) malloc(nBytes); h_b = (int*) malloc(nBytes); // Allocate memory on device hipMalloc((void**) &d_a, nBytes); hipMalloc((void**) &d_b, nBytes); // Init data on host for (int i = 0; i < n; i++) { h_a[i] = 114 + i; h_b[i] = 514 - i; } // Copy data to device hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, nBytes, hipMemcpyHostToDevice); // Set kernel arguments and launch kernel dim3 block(64); dim3 grid(n / block.x); hipLaunchKernelGGL(( cuda_vector_add), dim3(grid), dim3(block), 0, 0, d_a, d_b); // Generate result on host for (int i = 0; i < n; i++) h_b[i] += h_a[i]; // Copy result from device to host hipMemcpy(h_a, d_a, nBytes, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Check the results for (int i = 0; i < n; i++) assert(h_a[i] == h_b[i]); printf("Result is correct.\n"); // Free host memory free(h_a); free(h_b); // Free device memory hipFree(d_a); hipFree(d_b); return 0; }
406c18d42252580db5667f59c02d8442385b2900.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> __global__ void cuda_vector_add(int *a, int *b) { __shared__ int results[64]; // Actually we don't need this, just for illustration int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x; int local_thread_id = threadIdx.x; results[local_thread_id] = a[global_thread_id] + b[global_thread_id]; __syncthreads(); a[global_thread_id] = results[local_thread_id]; } int main(int argc, char **argv) { int n, nBytes; n = atoi(argv[1]); n = (n + 63) / 64 * 64; nBytes = sizeof(int) * n; printf("Vector add, length = %d\n", n); int *h_a, *h_b, *d_a, *d_b; // Allocate memory on host h_a = (int*) malloc(nBytes); h_b = (int*) malloc(nBytes); // Allocate memory on device cudaMalloc((void**) &d_a, nBytes); cudaMalloc((void**) &d_b, nBytes); // Init data on host for (int i = 0; i < n; i++) { h_a[i] = 114 + i; h_b[i] = 514 - i; } // Copy data to device cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice); // Set kernel arguments and launch kernel dim3 block(64); dim3 grid(n / block.x); cuda_vector_add<<<grid, block>>>(d_a, d_b); // Generate result on host for (int i = 0; i < n; i++) h_b[i] += h_a[i]; // Copy result from device to host cudaMemcpy(h_a, d_a, nBytes, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Check the results for (int i = 0; i < n; i++) assert(h_a[i] == h_b[i]); printf("Result is correct.\n"); // Free host memory free(h_a); free(h_b); // Free device memory cudaFree(d_a); cudaFree(d_b); return 0; }
e766b60f0895fdf55f6ffbfeca27f5ad409b65fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/scalar_logical_kernels.h" #include "oneflow/user/kernels/elementwise_xpu_kernel.cuh" namespace oneflow { template<template<typename T> class BIN_OP, typename T> __global__ void DoCUDAScalarLogical(const int64_t elem_cnt, const T scalar, const T* in, int8_t* out) { DoScalarLogical<BIN_OP, T>(elem_cnt, scalar, in, out); } template<template<typename T> class BIN_OP, typename T> struct ScalarLogicalFunctor<DeviceType::kGPU, BIN_OP, T> final { void operator()(ep::Stream* stream, const int64_t elem_cnt, const T scalar, const T* in, int8_t* out) { RUN_CUDA_KERNEL((DoCUDAScalarLogical<BIN_OP, T>), stream, BlocksNum4ThreadsNum(elem_cnt), elem_cnt, scalar, in, out); } }; INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncEQ); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncNE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncGT); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncGE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncLT); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncLE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncOR); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncXOR); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncAND); } // namespace oneflow
e766b60f0895fdf55f6ffbfeca27f5ad409b65fe.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/scalar_logical_kernels.h" #include "oneflow/user/kernels/elementwise_xpu_kernel.cuh" namespace oneflow { template<template<typename T> class BIN_OP, typename T> __global__ void DoCUDAScalarLogical(const int64_t elem_cnt, const T scalar, const T* in, int8_t* out) { DoScalarLogical<BIN_OP, T>(elem_cnt, scalar, in, out); } template<template<typename T> class BIN_OP, typename T> struct ScalarLogicalFunctor<DeviceType::kGPU, BIN_OP, T> final { void operator()(ep::Stream* stream, const int64_t elem_cnt, const T scalar, const T* in, int8_t* out) { RUN_CUDA_KERNEL((DoCUDAScalarLogical<BIN_OP, T>), stream, BlocksNum4ThreadsNum(elem_cnt), elem_cnt, scalar, in, out); } }; INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncEQ); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncNE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncGT); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncGE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncLT); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncLE); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncOR); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncXOR); INSTANTIATE_SCALAR_LOGICAL_FUNCTORS(DeviceType::kGPU, BinaryFuncAND); } // namespace oneflow
38e28de61b19b90aa0b07157f29472c121e607d0.hip
// !!! This is a file automatically generated by hipify!!! #include "open_acc_map_header.cuh" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" extern __device__ Vec4Simple<int> cuda_device_1(int *dev_a, int *dev_b); template <class T> class Vec4Simple { public: //T val[4] __attribute__((aligned(32))); T val[4]; Vec4Simple() { } // Replicate scalar x across v. Vec4Simple(T x) { for(unsigned int i=0;i<4;i++) val[i]=x; } // Replicate 4 values across v. Vec4Simple(T a,T b,T c,T d) { val[0]=a; val[1]=b; val[2]=c; val[3]=d; } // Copy vector v. Vec4Simple(Vec4Simple const &x) { for(unsigned int i=0;i<4;i++) val[i]=x.val[i]; } // Member function to load from array (unaligned) Vec4Simple & load(T const * p) { for(unsigned int i=0;i<4;i++) val[i]=p[i]; return *this; } // Member function to load from array, aligned by 32 Vec4Simple & load_a(T const * p) { return this->load(p); } Vec4Simple & insert(int i,T const &x) { val[i]=x; return *this; } // Member function to store into array (unaligned) void store(T * p) const { for(unsigned int i=0;i<4;i++) p[i]=val[i]; } // Member function to store into array, aligned by 32 void store_a(T * p) const { this->store(p); } /* Vec4Simple & operator = (Vec4Simple const & r) { for(unsigned int i=0;i<4;i++) val[i]=r.val[i]; return *this; } */ __device__ __host__ Vec4Simple & operator = (Vec4Simple const & r) { for(unsigned int i=0;i<4;i++) val[i]=r.val[i]; return *this; } T operator [](int i) const { return val[i]; } Vec4Simple operator++ (int) { Vec4Simple<T> temp (*this); for(unsigned int i=0;i<4;i++) val[i]++; return temp; } __device__ Vec4Simple<int> cuda_device_1(int *dev_a, int *dev_b) { Vec4Simple<T> temp (*this); int i = threadIdx.x; if (i < 4) { val[i] = dev_a[i] + dev_b[i]; } return temp; } };
38e28de61b19b90aa0b07157f29472c121e607d0.cu
#include "open_acc_map_header.cuh" #include "device_launch_parameters.h" #include "cuda.h" #include "cuda_runtime.h" extern __device__ Vec4Simple<int> cuda_device_1(int *dev_a, int *dev_b); template <class T> class Vec4Simple { public: //T val[4] __attribute__((aligned(32))); T val[4]; Vec4Simple() { } // Replicate scalar x across v. Vec4Simple(T x) { for(unsigned int i=0;i<4;i++) val[i]=x; } // Replicate 4 values across v. Vec4Simple(T a,T b,T c,T d) { val[0]=a; val[1]=b; val[2]=c; val[3]=d; } // Copy vector v. Vec4Simple(Vec4Simple const &x) { for(unsigned int i=0;i<4;i++) val[i]=x.val[i]; } // Member function to load from array (unaligned) Vec4Simple & load(T const * p) { for(unsigned int i=0;i<4;i++) val[i]=p[i]; return *this; } // Member function to load from array, aligned by 32 Vec4Simple & load_a(T const * p) { return this->load(p); } Vec4Simple & insert(int i,T const &x) { val[i]=x; return *this; } // Member function to store into array (unaligned) void store(T * p) const { for(unsigned int i=0;i<4;i++) p[i]=val[i]; } // Member function to store into array, aligned by 32 void store_a(T * p) const { this->store(p); } /* Vec4Simple & operator = (Vec4Simple const & r) { for(unsigned int i=0;i<4;i++) val[i]=r.val[i]; return *this; } */ __device__ __host__ Vec4Simple & operator = (Vec4Simple const & r) { for(unsigned int i=0;i<4;i++) val[i]=r.val[i]; return *this; } T operator [](int i) const { return val[i]; } Vec4Simple operator++ (int) { Vec4Simple<T> temp (*this); for(unsigned int i=0;i<4;i++) val[i]++; return temp; } __device__ Vec4Simple<int> cuda_device_1(int *dev_a, int *dev_b) { Vec4Simple<T> temp (*this); int i = threadIdx.x; if (i < 4) { val[i] = dev_a[i] + dev_b[i]; } return temp; } };
9674e6fa83e67709c87b63e033ec2f7e4584ab44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // nnet0/nnet-kernels.cu // Copyright 2015 Johns Hopkins University (author: Daniel Povey) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. #include <cfloat> #include "nnet0/nnet-kernels-ansi.h" #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 200 #error - Kaldi no longer supports CC1.x devices. Please use a newer GPU or \ configure with --use-cuda=no (this will disable the use of GPU). #endif #define ATOMIC_CONST 32 #define CU_BLOCK_DIM 1024 template <typename Real> __host__ __device__ inline float log_plus(Real a, Real b) { if (a == -float(INFINITY)) return b; if (b == -float(INFINITY)) return a; float m = a > b ? a : b; return log1pf(expf(-fabs(a - b))) + m; } template <typename Real> __device__ float atomic_log_plus(Real *addr_f, Real value) { int *addr = (int*)addr_f; float expected = *addr_f; float sum = log_plus(expected, value); int old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); while (old_value != __float_as_int(expected)) { expected = __int_as_float(old_value); sum = log_plus(expected, value); old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); } return __int_as_float(old_value); } // <<<batch_size, CU_BLOCK_CONST>>> template <typename Real> __global__ static void alpha_first_kernel(Real *alpha, const int alpha_size, const int batch_size, const int T, const Real * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[mini_batch_idx * alpha_size * (T+1) + idx] = start_weight[idx]; } } __global__ static void alpha_kernel(float *alpha, const float* const logits, const int batch_size, const int T, const int t, const int * const input_lengths, const int alpha_size, const int logits_size, const IntPair * const alpha_transition_index, const Transition * const alpha_transition, bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t > input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * alpha_size * (T+1) + alpha_size * t; int idx2 = mini_batch_idx * alpha_size * (T+1) + alpha_size * (t-1); int idx3 = 0; if (batch_first) idx3 = mini_batch_idx * logits_size * T + logits_size * (t-1); else idx3 = batch_size * logits_size * (t-1) + mini_batch_idx * logits_size; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { int start = alpha_transition_index[idx].first; int end = alpha_transition_index[idx].second; float result = -float(INFINITY); for (int k = start; k <= end; k++) { result = log_plus(alpha[idx2+alpha_transition[k].state] + alpha_transition[k].weight + logits[idx3+alpha_transition[k].label], result); } alpha[idx1+idx] = result; } } __global__ static void alpha_last_kernel(float *alpha, const int alpha_size, const int batch_size, const int T, const int * const input_lengths, const float * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int alpha_start = mini_batch_idx * alpha_size * (T+1); int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[alpha_start+cT*alpha_size+idx] += end_weight[idx]; } } // <<< minibatch, N = 32,64,128...>>> __global__ static void alpha_lld_kernal(const float * const alpha, const int alpha_size, const int T, const int * const input_lengths, float * loglikelihood) { int mini_batch_idx = blockIdx.x; int idx = threadIdx.x; int block_dim = blockDim.x; int cT = input_lengths[mini_batch_idx]; int last_idx = alpha_size * (T+1) * mini_batch_idx + cT*alpha_size; // printf("enter alpha_lld_kernal, block.x: %d, thread.x: %d\n", blockIdx.x, threadIdx.x); extern __shared__ float sdata[]; float temp = -float(INFINITY); for (int i = idx; i < alpha_size; i += block_dim) { temp = log_plus(temp, alpha[last_idx+i]); } sdata[idx] = temp; __syncthreads(); for (int shift = block_dim / 2; shift > warpSize; shift >>= 1) { if (idx < shift) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } __syncthreads(); } if (idx < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } } __syncthreads(); if (idx == 0) { loglikelihood[mini_batch_idx] = sdata[0]; // printf("alpha loglikelihod: %f mini_batch %d\n", loglikelihood[mini_batch_idx], mini_batch_idx); } } template <typename Real> __global__ static void beta_last_kernel(Real *beta, const int beta_size, const int batch_size, const int * const input_lengths, const Real * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + (cT % 2) * beta_size + idx] = end_weight[idx]; } } template <typename Real> __global__ void beta_first_kernel(Real *beta, const int beta_size, const int batch_size, const Real * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + idx] += start_weight[idx]; } } template <typename Real> __global__ static void beta_kernel(Real *beta, const Real* const alpha, const Real* const logits, Real *grad_storage, const int batch_size, const int T, const int t, const int *input_lengths, const int beta_size, const int logits_size, const IntPair * const beta_transition_index, const Transition * const beta_transition, const bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * beta_size * (T+1) + beta_size * t; int idx2 = mini_batch_idx * beta_size * 2 + beta_size * ((t+1) % 2); int idx3 = mini_batch_idx * beta_size * 2 + beta_size * (t % 2); int idx4 = 0; if (batch_first) idx4 = mini_batch_idx * logits_size * T + logits_size * t; else idx4 = batch_size * logits_size * t + mini_batch_idx * logits_size; int idx5 = mini_batch_idx * logits_size * ATOMIC_CONST; for (int idx = tid; idx < beta_size; idx += blockDim.x) { int start = beta_transition_index[idx].first; int end = beta_transition_index[idx].second; float beta_result = -float(INFINITY); float temp_value = -float(INFINITY); for (int k = start; k <= end; k++) { temp_value = beta[idx2+beta_transition[k].state] + beta_transition[k].weight + logits[idx4+beta_transition[k].label]; beta_result = log_plus(temp_value, beta_result); float partial_grad = alpha[idx1+idx] + temp_value; float *grad_position = grad_storage + idx5 + beta_transition[k].label * ATOMIC_CONST + threadIdx.x % ATOMIC_CONST; atomic_log_plus(grad_position, partial_grad); } beta[idx3+idx] = beta_result; } } template <typename Real> __global__ static void copy_grad(Real *grad_storage, Real *grad_net, const Real * const alpha_lld, const int * const input_lengths, const int batch_size, const int logits_size, const int T, const int t, const bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; int idx1 = 0; if (batch_first) idx1 = mini_batch_idx * logits_size * T + logits_size * t; else idx1 = batch_size * logits_size * t + mini_batch_idx * logits_size; float lld = alpha_lld[mini_batch_idx]; for (int idx = tid; idx < logits_size; idx += blockDim.x) { float *grad_position = grad_net + idx1 + idx; int idx_storage = mini_batch_idx*logits_size*ATOMIC_CONST+idx*ATOMIC_CONST; float grad = -float(INFINITY); for (int i = 0; i < ATOMIC_CONST; i++) { grad = log_plus(grad_storage[idx_storage+i], grad); grad_storage[idx_storage+i] = -float(INFINITY); } *grad_position = expf(grad - lld); } } template <typename Real> __global__ static void beta_lld_kernal(const Real * const beta, const int beta_size, Real * loglikelihood) { int idx = threadIdx.x; int first_idx = beta_size * 2 * idx; loglikelihood[idx] = beta[first_idx]; } void cuda_compute_alpha(dim3 Gr, dim3 Bl, BaseFloat *alpha, const BaseFloat *logits, const int batch_size, int T, const int alpha_size, int logits_size, int *input_lengths, BaseFloat *loglikelihood, const BaseFloat *start_weight, const BaseFloat *end_weight, const IntPair *transition_index_alpha, const Transition *transition_alpha, hipStream_t stream, const bool batch_first) { int alpha_lld_dim = 128; hipLaunchKernelGGL(( alpha_first_kernel), dim3(Gr), dim3(Bl), 0, stream, alpha, alpha_size, batch_size, T, start_weight); for (int t = 1; t <= T; t++) { hipLaunchKernelGGL(( alpha_kernel), dim3(Gr), dim3(Bl), 0, stream, alpha, logits, batch_size, T, t, input_lengths, alpha_size, logits_size, transition_index_alpha, transition_alpha, batch_first); } hipLaunchKernelGGL(( alpha_last_kernel), dim3(Gr), dim3(Bl), 0, stream, alpha, alpha_size, batch_size, T, input_lengths, end_weight); hipLaunchKernelGGL(( alpha_lld_kernal), dim3(Gr), dim3(alpha_lld_dim), sizeof(float)*alpha_lld_dim, stream, alpha, alpha_size, T, input_lengths, loglikelihood); // hipDeviceSynchronize(); } void cuda_compute_beta_and_grad(dim3 Gr, dim3 Bl, BaseFloat *beta, const BaseFloat * alpha, const BaseFloat * logits, const BaseFloat * alpha_lld, BaseFloat *grad_storage, BaseFloat *grad_net, const int batch_size, const int T, const int beta_size, const int logits_size, const int * input_lengths, BaseFloat * loglikelihood, const BaseFloat *start_weight, const BaseFloat *end_weight, const IntPair *transition_index_beta, const Transition *transition_beta, hipStream_t stream, const bool batch_first) { // set grad_storage hipLaunchKernelGGL(( copy_grad), dim3(Gr), dim3(Bl), 0, stream, grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, 0, batch_first); hipLaunchKernelGGL(( beta_last_kernel), dim3(Gr), dim3(Bl), 0, stream, beta, beta_size, batch_size, input_lengths, end_weight); for (int t = T-1; t >= 0; t--) { hipLaunchKernelGGL(( beta_kernel), dim3(Gr), dim3(Bl), 0, stream, beta, alpha, logits, grad_storage, batch_size, T, t, input_lengths, beta_size, logits_size, transition_index_beta, transition_beta, batch_first); hipLaunchKernelGGL(( copy_grad), dim3(Gr), dim3(Bl), 0, stream, grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, t, batch_first); } hipLaunchKernelGGL(( beta_first_kernel), dim3(Gr), dim3(Bl), 0, stream, beta, beta_size, batch_size, start_weight); hipLaunchKernelGGL(( beta_lld_kernal), dim3(1), dim3(Gr), 0, 0, beta, beta_size, loglikelihood); }
9674e6fa83e67709c87b63e033ec2f7e4584ab44.cu
// nnet0/nnet-kernels.cu // Copyright 2015 Johns Hopkins University (author: Daniel Povey) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. #include <cfloat> #include "nnet0/nnet-kernels-ansi.h" #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 200 #error - Kaldi no longer supports CC1.x devices. Please use a newer GPU or \ configure with --use-cuda=no (this will disable the use of GPU). #endif #define ATOMIC_CONST 32 #define CU_BLOCK_DIM 1024 template <typename Real> __host__ __device__ inline float log_plus(Real a, Real b) { if (a == -float(INFINITY)) return b; if (b == -float(INFINITY)) return a; float m = a > b ? a : b; return log1pf(expf(-fabs(a - b))) + m; } template <typename Real> __device__ float atomic_log_plus(Real *addr_f, Real value) { int *addr = (int*)addr_f; float expected = *addr_f; float sum = log_plus(expected, value); int old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); while (old_value != __float_as_int(expected)) { expected = __int_as_float(old_value); sum = log_plus(expected, value); old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); } return __int_as_float(old_value); } // <<<batch_size, CU_BLOCK_CONST>>> template <typename Real> __global__ static void alpha_first_kernel(Real *alpha, const int alpha_size, const int batch_size, const int T, const Real * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[mini_batch_idx * alpha_size * (T+1) + idx] = start_weight[idx]; } } __global__ static void alpha_kernel(float *alpha, const float* const logits, const int batch_size, const int T, const int t, const int * const input_lengths, const int alpha_size, const int logits_size, const IntPair * const alpha_transition_index, const Transition * const alpha_transition, bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t > input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * alpha_size * (T+1) + alpha_size * t; int idx2 = mini_batch_idx * alpha_size * (T+1) + alpha_size * (t-1); int idx3 = 0; if (batch_first) idx3 = mini_batch_idx * logits_size * T + logits_size * (t-1); else idx3 = batch_size * logits_size * (t-1) + mini_batch_idx * logits_size; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { int start = alpha_transition_index[idx].first; int end = alpha_transition_index[idx].second; float result = -float(INFINITY); for (int k = start; k <= end; k++) { result = log_plus(alpha[idx2+alpha_transition[k].state] + alpha_transition[k].weight + logits[idx3+alpha_transition[k].label], result); } alpha[idx1+idx] = result; } } __global__ static void alpha_last_kernel(float *alpha, const int alpha_size, const int batch_size, const int T, const int * const input_lengths, const float * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int alpha_start = mini_batch_idx * alpha_size * (T+1); int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[alpha_start+cT*alpha_size+idx] += end_weight[idx]; } } // <<< minibatch, N = 32,64,128...>>> __global__ static void alpha_lld_kernal(const float * const alpha, const int alpha_size, const int T, const int * const input_lengths, float * loglikelihood) { int mini_batch_idx = blockIdx.x; int idx = threadIdx.x; int block_dim = blockDim.x; int cT = input_lengths[mini_batch_idx]; int last_idx = alpha_size * (T+1) * mini_batch_idx + cT*alpha_size; // printf("enter alpha_lld_kernal, block.x: %d, thread.x: %d\n", blockIdx.x, threadIdx.x); extern __shared__ float sdata[]; float temp = -float(INFINITY); for (int i = idx; i < alpha_size; i += block_dim) { temp = log_plus(temp, alpha[last_idx+i]); } sdata[idx] = temp; __syncthreads(); for (int shift = block_dim / 2; shift > warpSize; shift >>= 1) { if (idx < shift) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } __syncthreads(); } if (idx < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } } __syncthreads(); if (idx == 0) { loglikelihood[mini_batch_idx] = sdata[0]; // printf("alpha loglikelihod: %f mini_batch %d\n", loglikelihood[mini_batch_idx], mini_batch_idx); } } template <typename Real> __global__ static void beta_last_kernel(Real *beta, const int beta_size, const int batch_size, const int * const input_lengths, const Real * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + (cT % 2) * beta_size + idx] = end_weight[idx]; } } template <typename Real> __global__ void beta_first_kernel(Real *beta, const int beta_size, const int batch_size, const Real * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + idx] += start_weight[idx]; } } template <typename Real> __global__ static void beta_kernel(Real *beta, const Real* const alpha, const Real* const logits, Real *grad_storage, const int batch_size, const int T, const int t, const int *input_lengths, const int beta_size, const int logits_size, const IntPair * const beta_transition_index, const Transition * const beta_transition, const bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * beta_size * (T+1) + beta_size * t; int idx2 = mini_batch_idx * beta_size * 2 + beta_size * ((t+1) % 2); int idx3 = mini_batch_idx * beta_size * 2 + beta_size * (t % 2); int idx4 = 0; if (batch_first) idx4 = mini_batch_idx * logits_size * T + logits_size * t; else idx4 = batch_size * logits_size * t + mini_batch_idx * logits_size; int idx5 = mini_batch_idx * logits_size * ATOMIC_CONST; for (int idx = tid; idx < beta_size; idx += blockDim.x) { int start = beta_transition_index[idx].first; int end = beta_transition_index[idx].second; float beta_result = -float(INFINITY); float temp_value = -float(INFINITY); for (int k = start; k <= end; k++) { temp_value = beta[idx2+beta_transition[k].state] + beta_transition[k].weight + logits[idx4+beta_transition[k].label]; beta_result = log_plus(temp_value, beta_result); float partial_grad = alpha[idx1+idx] + temp_value; float *grad_position = grad_storage + idx5 + beta_transition[k].label * ATOMIC_CONST + threadIdx.x % ATOMIC_CONST; atomic_log_plus(grad_position, partial_grad); } beta[idx3+idx] = beta_result; } } template <typename Real> __global__ static void copy_grad(Real *grad_storage, Real *grad_net, const Real * const alpha_lld, const int * const input_lengths, const int batch_size, const int logits_size, const int T, const int t, const bool batch_first) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; int idx1 = 0; if (batch_first) idx1 = mini_batch_idx * logits_size * T + logits_size * t; else idx1 = batch_size * logits_size * t + mini_batch_idx * logits_size; float lld = alpha_lld[mini_batch_idx]; for (int idx = tid; idx < logits_size; idx += blockDim.x) { float *grad_position = grad_net + idx1 + idx; int idx_storage = mini_batch_idx*logits_size*ATOMIC_CONST+idx*ATOMIC_CONST; float grad = -float(INFINITY); for (int i = 0; i < ATOMIC_CONST; i++) { grad = log_plus(grad_storage[idx_storage+i], grad); grad_storage[idx_storage+i] = -float(INFINITY); } *grad_position = expf(grad - lld); } } template <typename Real> __global__ static void beta_lld_kernal(const Real * const beta, const int beta_size, Real * loglikelihood) { int idx = threadIdx.x; int first_idx = beta_size * 2 * idx; loglikelihood[idx] = beta[first_idx]; } void cuda_compute_alpha(dim3 Gr, dim3 Bl, BaseFloat *alpha, const BaseFloat *logits, const int batch_size, int T, const int alpha_size, int logits_size, int *input_lengths, BaseFloat *loglikelihood, const BaseFloat *start_weight, const BaseFloat *end_weight, const IntPair *transition_index_alpha, const Transition *transition_alpha, cudaStream_t stream, const bool batch_first) { int alpha_lld_dim = 128; alpha_first_kernel<<<Gr, Bl, 0, stream>>>(alpha, alpha_size, batch_size, T, start_weight); for (int t = 1; t <= T; t++) { alpha_kernel<<<Gr, Bl, 0, stream>>>(alpha, logits, batch_size, T, t, input_lengths, alpha_size, logits_size, transition_index_alpha, transition_alpha, batch_first); } alpha_last_kernel<<<Gr, Bl, 0, stream>>>(alpha, alpha_size, batch_size, T, input_lengths, end_weight); alpha_lld_kernal<<<Gr, alpha_lld_dim, sizeof(float)*alpha_lld_dim, stream>>>(alpha, alpha_size, T, input_lengths, loglikelihood); // cudaDeviceSynchronize(); } void cuda_compute_beta_and_grad(dim3 Gr, dim3 Bl, BaseFloat *beta, const BaseFloat * alpha, const BaseFloat * logits, const BaseFloat * alpha_lld, BaseFloat *grad_storage, BaseFloat *grad_net, const int batch_size, const int T, const int beta_size, const int logits_size, const int * input_lengths, BaseFloat * loglikelihood, const BaseFloat *start_weight, const BaseFloat *end_weight, const IntPair *transition_index_beta, const Transition *transition_beta, cudaStream_t stream, const bool batch_first) { // set grad_storage copy_grad<<<Gr, Bl, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, 0, batch_first); beta_last_kernel<<<Gr, Bl, 0, stream>>>(beta, beta_size, batch_size, input_lengths, end_weight); for (int t = T-1; t >= 0; t--) { beta_kernel<<<Gr, Bl, 0, stream>>>(beta, alpha, logits, grad_storage, batch_size, T, t, input_lengths, beta_size, logits_size, transition_index_beta, transition_beta, batch_first); copy_grad<<<Gr, Bl, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, t, batch_first); } beta_first_kernel<<<Gr, Bl, 0, stream>>>(beta, beta_size, batch_size, start_weight); beta_lld_kernal<<<1, Gr>>>(beta, beta_size, loglikelihood); }
9e60e399356d93d0a4b24fc1382a3c76574afad6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "human.cuh" #include <cutil.h> #include <stdio.h> #include <iostream> #include <fstream> #include "../../math/random.cuh" #include "../../observer/observer.cuh" #include "../../agent/Turtle.cuh" #include "seed.cuh" /* ID */ using namespace std; __constant__ int D_SEED = SEED; /* */ human::human(int N, int minX, int maxX, int minY, int maxY){//N int* dev_agentX = NULL; int* dev_agentY = NULL; int* dev_agentInfect = NULL; int* dev_agentInfectionTime = NULL; add_human(N); int *x = get_pointer_x(0); int *y = get_pointer_y(0); int *infect_P = get_pointer_infect(0); int *infection_time_P = get_pointer_infection_time(0); //infect = NULL; //die = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&dev_agentX, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_agentY, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_agentInfect, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_agentInfectionTime, sizeof(int)*N)); dim3 blocks(1024, 1, 1); dim3 grids((N+1023)/1024, 1, 1); hipLaunchKernelGGL(( initiate_agent), dim3(grids), dim3(blocks), 0, 0, dev_agentX, dev_agentY, dev_agentInfect, dev_agentInfectionTime, N, minX, maxX, minY, maxY); CUDA_SAFE_CALL(hipMemcpy(x, dev_agentX, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(y, dev_agentY, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(infect_P, dev_agentInfect, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(infection_time_P, dev_agentInfectionTime, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(dev_agentX)); CUDA_SAFE_CALL(hipFree(dev_agentY)); CUDA_SAFE_CALL(hipFree(dev_agentInfect)); CUDA_SAFE_CALL(hipFree(dev_agentInfectionTime)); } human::~human(){ } /* int human::getX(int id){ return x[id]; } int human::getY(int id){ return y[id]; } */ int human::getInfect(int id){ return infect[id]; } int human::getInfectionTime(int id){ return infection_time[id]; } /* int* human::get_pointer_x(){ return x; } int* human::get_pointer_y(){ return y; } */ int* human::get_pointer_infect(int id){ return &infect[id]; } int* human::get_pointer_infection_time(int id){ return &infection_time[id]; } void human::add_human(int N){ add_turtle(N); for (int i=0; i<N; ++i){ infect.push_back(0); infection_time.push_back(0); } } /*reserve*/ void human::die(int id){ remove_turtle(id); if ((infect.empty() == false) && (infection_time.empty() == false)){ int search = 0; vector<int>::iterator it_infect = infect.begin(); vector<int>::iterator it_infection_time = infection_time.begin(); while (search != id){ ++it_infect; ++it_infection_time; ++search; } infect.erase(it_infect); infection_time.erase(it_infection_time); } } __global__ void agent_infect(int infect_distance, int* world, int* devX, int* devY, int* infect, int N, int minX, int maxX, int minY, int maxY){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //const int infect_parameter = 80; if (tid < N){ if(infect[tid] != 1){ if (world[devX[tid]+((maxX-minX+1)*devY[tid])] >= 1){ infect[tid] = 1; } /**/ /* int pos = devX[tid] + ((maxX-minX+1)*devY[tid]); int count = check_with_distance(world, infect_distance, pos, minX, maxX, minY, maxY); */ /**/ /* if (count > 0){ int parameter = (Rand() % 101) + (10*count); if (parameter > infect_parameter){// infect[tid] = 1; } } else{ int parameter = Rand() % 101; if (parameter > infect_parameter){ infect[tid] = 1; } } */ } } } __global__ void update_field_before_move(int* world, int* infect, int* x, int* y, int N, int minX, int maxX){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (infect[tid] == 1){ if (world[(y[tid]*(maxX-minX+1))+x[tid]] > 0){ atomicSub(&world[(y[tid]*(maxX-minX+1))+x[tid]], 1); } } } } __global__ void update_field_after_move(int* world, int* infect, int* x, int* y, int N, int minX, int maxX){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (infect[tid] == 1){ atomicAdd(&world[(y[tid]*(maxX-minX+1))+x[tid]], 1); } } } __global__ void update_infection_time(int* dev_infect, int* dev_infection_time, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (dev_infect[tid] == 1){ ++dev_infection_time[tid]; } } } void human::step(zombie& zombies, int* field, int minX, int maxX, int minY, int maxY, int& N, int& zombieN){ int field_count = (maxX-minX+1)*(maxY-minY+1); int *tmp_field = (int*)malloc(sizeof(int)*field_count);//before info dim3 grids((N+1023)/1024,1,1); dim3 blocks(1024,1,1); int *devX, *devY; int *dev_infect, *dev_infection_time; int *dev_field_before;//, *dev_field_after; int *x = get_pointer_x(0); int *y = get_pointer_y(0); int *infect = get_pointer_infect(0); int *infection_time = get_pointer_infection_time(0); for (int i=(int)minY;i<(int)maxY;++i){ tmp_field[i] = field[i]; } /*update infection time*/ CUDA_SAFE_CALL(hipMalloc((void**)&dev_infection_time, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(hipMemcpy(dev_infect, infect, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_infection_time, infection_time, sizeof(int)*N, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( update_infection_time), dim3(grids), dim3(blocks), 0, 0, dev_infect, dev_infection_time, N); CUDA_SAFE_CALL(hipMemcpy(infection_time, dev_infection_time, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(dev_infection_time)); CUDA_SAFE_CALL(hipFree(dev_infect)); /*update field before move*/ /* CUDA_SAFE_CALL(hipMalloc((void**)&dev_field_after, sizeof(int)*field_count)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(hipMemcpy(dev_field_after, field, sizeof(int)*field_count, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_infect, infect, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devX, x, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devY, y, sizeof(int)*N, hipMemcpyHostToDevice)); update_field_before_move<<<grids, blocks>>>(dev_field_after, dev_infect, devX, devY, N, minX, maxX); CUDA_SAFE_CALL(hipMemcpy(field, dev_field_after, sizeof(int)*field_count, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(devX)); CUDA_SAFE_CALL(hipFree(devY)); CUDA_SAFE_CALL(hipFree(dev_field_after)); CUDA_SAFE_CALL(hipFree(dev_infect)); */ /*move*/ CUDA_SAFE_CALL(hipMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(hipMemcpy(devX, x, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devY, y, sizeof(int)*N, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( move), dim3(grids),dim3(blocks), 0, 0, 2,devX,devY,N,minX,maxX,minY,maxY); CUDA_SAFE_CALL(hipMemcpy(x, devX, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(y, devY, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(devX)); CUDA_SAFE_CALL(hipFree(devY)); /*update field after move*/ /* CUDA_SAFE_CALL(hipMalloc((void**)&dev_field_after, sizeof(int)*field_count)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(hipMemcpy(dev_field_after, field, sizeof(int)*field_count, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_infect, infect, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devX, x, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devY, y, sizeof(int)*N, hipMemcpyHostToDevice)); update_field_after_move<<<grids, blocks>>>(dev_field_after, dev_infect, devX, devY, N, minX, maxX); CUDA_SAFE_CALL(hipMemcpy(field, dev_field_after, sizeof(int)*field_count, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(devX)); CUDA_SAFE_CALL(hipFree(devY)); CUDA_SAFE_CALL(hipFree(dev_field_after)); CUDA_SAFE_CALL(hipFree(dev_infect)); */ /*infect*/ CUDA_SAFE_CALL(hipMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(hipMemcpy(devX, x, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(devY, y, sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_field_before, sizeof(int)*field_count)); CUDA_SAFE_CALL(hipMemcpy(dev_infect, &infect[0], sizeof(int)*N, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_field_before, tmp_field, sizeof(int)*field_count, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( agent_infect), dim3(grids), dim3(blocks), 0, 0, 1, dev_field_before, devX, devY, dev_infect, N, minX, maxX, minY, maxY); CUDA_SAFE_CALL(hipMemcpy(infect, dev_infect, sizeof(int)*N, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(tmp_field, dev_field_before, sizeof(int)*field_count, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(dev_infect)); CUDA_SAFE_CALL(hipFree(dev_field_before)); CUDA_SAFE_CALL(hipFree(devX)); CUDA_SAFE_CALL(hipFree(devY)); free(tmp_field); for (int id=0; id<N; ++id){ if (infection_time[id] >= 5){ zombies.add_zombie(getX(id), getY(id), minX, maxX, minY, maxY, field); //cout << "X:" << getX(id) << ", Y:" << getY(id) << endl; ++zombieN; die(id); --N; --id; //id = 0; } } } __global__ void initiate_agent(int* x, int* y, int* infect, int* infection_time, int N, int minX, int maxX, int minY, int maxY){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ //x[tid] = (Rand() << tid) % (maxX-minX+1);//% (maxX+1); //y[tid] = (Rand() >> tid) % (maxY-minY+1);//% (maxY+1); x[tid] = abs(Xorshift128(tid+D_SEED,N-tid+D_SEED) % (maxX-minX+1)); y[tid] = abs(Xorshift128(N-tid+D_SEED,tid+D_SEED) % (maxY-minY+1)); infect[tid] = 0; infection_time[tid] = 0; } } void human::output_human_info(int N){ int infect_count = 0; ofstream ofs("info_human.csv"); ofs << "id,x,y,infected,infection time" << endl; for (int i=0; i<N; ++i){ if (getInfect(i) == 1){ ++infect_count; ofs << i << "," << getX(i) << "," << getY(i) << "," << "true" << "," << infection_time[i] << endl;//<< getInfectionTime(i) << endl; } else{ ofs << i << "," << getX(i) << "," << getY(i) << "," << "false" << "," << getInfectionTime(i) << endl; } } ofs << endl; ofs << "," << infect_count << "" << endl; }
9e60e399356d93d0a4b24fc1382a3c76574afad6.cu
#include "human.cuh" #include <cutil.h> #include <stdio.h> #include <iostream> #include <fstream> #include "../../math/random.cuh" #include "../../observer/observer.cuh" #include "../../agent/Turtle.cuh" #include "seed.cuh" /* エージェント個人が持つIDは配列引数の番号とする。 */ using namespace std; __constant__ int D_SEED = SEED; /* エージェント情報を初期化する */ human::human(int N, int minX, int maxX, int minY, int maxY){//Nはエージェント数 int* dev_agentX = NULL; int* dev_agentY = NULL; int* dev_agentInfect = NULL; int* dev_agentInfectionTime = NULL; add_human(N); int *x = get_pointer_x(0); int *y = get_pointer_y(0); int *infect_P = get_pointer_infect(0); int *infection_time_P = get_pointer_infection_time(0); //infect = NULL; //die = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&dev_agentX, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_agentY, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_agentInfect, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_agentInfectionTime, sizeof(int)*N)); dim3 blocks(1024, 1, 1); dim3 grids((N+1023)/1024, 1, 1); initiate_agent<<<grids, blocks>>>(dev_agentX, dev_agentY, dev_agentInfect, dev_agentInfectionTime, N, minX, maxX, minY, maxY); CUDA_SAFE_CALL(cudaMemcpy(x, dev_agentX, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(y, dev_agentY, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(infect_P, dev_agentInfect, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(infection_time_P, dev_agentInfectionTime, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(dev_agentX)); CUDA_SAFE_CALL(cudaFree(dev_agentY)); CUDA_SAFE_CALL(cudaFree(dev_agentInfect)); CUDA_SAFE_CALL(cudaFree(dev_agentInfectionTime)); } human::~human(){ } /* int human::getX(int id){ return x[id]; } int human::getY(int id){ return y[id]; } */ int human::getInfect(int id){ return infect[id]; } int human::getInfectionTime(int id){ return infection_time[id]; } /* int* human::get_pointer_x(){ return x; } int* human::get_pointer_y(){ return y; } */ int* human::get_pointer_infect(int id){ return &infect[id]; } int* human::get_pointer_infection_time(int id){ return &infection_time[id]; } void human::add_human(int N){ add_turtle(N); for (int i=0; i<N; ++i){ infect.push_back(0); infection_time.push_back(0); } } /*reserveはイテレータが取得できないので不可*/ void human::die(int id){ remove_turtle(id); if ((infect.empty() == false) && (infection_time.empty() == false)){ int search = 0; vector<int>::iterator it_infect = infect.begin(); vector<int>::iterator it_infection_time = infection_time.begin(); while (search != id){ ++it_infect; ++it_infection_time; ++search; } infect.erase(it_infect); infection_time.erase(it_infection_time); } } __global__ void agent_infect(int infect_distance, int* world, int* devX, int* devY, int* infect, int N, int minX, int maxX, int minY, int maxY){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //const int infect_parameter = 80; if (tid < N){ if(infect[tid] != 1){ if (world[devX[tid]+((maxX-minX+1)*devY[tid])] >= 1){ infect[tid] = 1; } /*以下は範囲外アクセスを考慮した周囲のチェック*/ /* int pos = devX[tid] + ((maxX-minX+1)*devY[tid]); int count = check_with_distance(world, infect_distance, pos, minX, maxX, minY, maxY); */ /*周囲チェックの処理はここまで*/ /* if (count > 0){ int parameter = (Rand() % 101) + (10*count); if (parameter > infect_parameter){//感染条件は適当にいじってよし infect[tid] = 1; } } else{ int parameter = Rand() % 101; if (parameter > infect_parameter){ infect[tid] = 1; } } */ } } } __global__ void update_field_before_move(int* world, int* infect, int* x, int* y, int N, int minX, int maxX){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (infect[tid] == 1){ if (world[(y[tid]*(maxX-minX+1))+x[tid]] > 0){ atomicSub(&world[(y[tid]*(maxX-minX+1))+x[tid]], 1); } } } } __global__ void update_field_after_move(int* world, int* infect, int* x, int* y, int N, int minX, int maxX){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (infect[tid] == 1){ atomicAdd(&world[(y[tid]*(maxX-minX+1))+x[tid]], 1); } } } __global__ void update_infection_time(int* dev_infect, int* dev_infection_time, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ if (dev_infect[tid] == 1){ ++dev_infection_time[tid]; } } } void human::step(zombie& zombies, int* field, int minX, int maxX, int minY, int maxY, int& N, int& zombieN){ int field_count = (maxX-minX+1)*(maxY-minY+1); int *tmp_field = (int*)malloc(sizeof(int)*field_count);//before info dim3 grids((N+1023)/1024,1,1); dim3 blocks(1024,1,1); int *devX, *devY; int *dev_infect, *dev_infection_time; int *dev_field_before;//, *dev_field_after; int *x = get_pointer_x(0); int *y = get_pointer_y(0); int *infect = get_pointer_infect(0); int *infection_time = get_pointer_infection_time(0); for (int i=(int)minY;i<(int)maxY;++i){ tmp_field[i] = field[i]; } /*update infection time*/ CUDA_SAFE_CALL(cudaMalloc((void**)&dev_infection_time, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMemcpy(dev_infect, infect, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_infection_time, infection_time, sizeof(int)*N, cudaMemcpyHostToDevice)); update_infection_time<<<grids, blocks>>>(dev_infect, dev_infection_time, N); CUDA_SAFE_CALL(cudaMemcpy(infection_time, dev_infection_time, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(dev_infection_time)); CUDA_SAFE_CALL(cudaFree(dev_infect)); /*update field before move*/ /* CUDA_SAFE_CALL(cudaMalloc((void**)&dev_field_after, sizeof(int)*field_count)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMemcpy(dev_field_after, field, sizeof(int)*field_count, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_infect, infect, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devX, x, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devY, y, sizeof(int)*N, cudaMemcpyHostToDevice)); update_field_before_move<<<grids, blocks>>>(dev_field_after, dev_infect, devX, devY, N, minX, maxX); CUDA_SAFE_CALL(cudaMemcpy(field, dev_field_after, sizeof(int)*field_count, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(devX)); CUDA_SAFE_CALL(cudaFree(devY)); CUDA_SAFE_CALL(cudaFree(dev_field_after)); CUDA_SAFE_CALL(cudaFree(dev_infect)); */ /*move*/ CUDA_SAFE_CALL(cudaMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMemcpy(devX, x, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devY, y, sizeof(int)*N, cudaMemcpyHostToDevice)); move<<<grids,blocks>>>(2,devX,devY,N,minX,maxX,minY,maxY); CUDA_SAFE_CALL(cudaMemcpy(x, devX, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(y, devY, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(devX)); CUDA_SAFE_CALL(cudaFree(devY)); /*update field after move*/ /* CUDA_SAFE_CALL(cudaMalloc((void**)&dev_field_after, sizeof(int)*field_count)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMemcpy(dev_field_after, field, sizeof(int)*field_count, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_infect, infect, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devX, x, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devY, y, sizeof(int)*N, cudaMemcpyHostToDevice)); update_field_after_move<<<grids, blocks>>>(dev_field_after, dev_infect, devX, devY, N, minX, maxX); CUDA_SAFE_CALL(cudaMemcpy(field, dev_field_after, sizeof(int)*field_count, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(devX)); CUDA_SAFE_CALL(cudaFree(devY)); CUDA_SAFE_CALL(cudaFree(dev_field_after)); CUDA_SAFE_CALL(cudaFree(dev_infect)); */ /*infect*/ CUDA_SAFE_CALL(cudaMalloc((void**)&devX, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&devY, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMemcpy(devX, x, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(devY, y, sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_infect, sizeof(int)*N)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_field_before, sizeof(int)*field_count)); CUDA_SAFE_CALL(cudaMemcpy(dev_infect, &infect[0], sizeof(int)*N, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_field_before, tmp_field, sizeof(int)*field_count, cudaMemcpyHostToDevice)); agent_infect<<<grids, blocks>>>(1, dev_field_before, devX, devY, dev_infect, N, minX, maxX, minY, maxY); CUDA_SAFE_CALL(cudaMemcpy(infect, dev_infect, sizeof(int)*N, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(tmp_field, dev_field_before, sizeof(int)*field_count, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(dev_infect)); CUDA_SAFE_CALL(cudaFree(dev_field_before)); CUDA_SAFE_CALL(cudaFree(devX)); CUDA_SAFE_CALL(cudaFree(devY)); free(tmp_field); for (int id=0; id<N; ++id){ if (infection_time[id] >= 5){ zombies.add_zombie(getX(id), getY(id), minX, maxX, minY, maxY, field); //cout << "X:" << getX(id) << ", Y:" << getY(id) << endl; ++zombieN; die(id); --N; --id; //id = 0; } } } __global__ void initiate_agent(int* x, int* y, int* infect, int* infection_time, int N, int minX, int maxX, int minY, int maxY){ int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N){ //x[tid] = (Rand() << tid) % (maxX-minX+1);//% (maxX+1); //y[tid] = (Rand() >> tid) % (maxY-minY+1);//% (maxY+1); x[tid] = abs(Xorshift128(tid+D_SEED,N-tid+D_SEED) % (maxX-minX+1)); y[tid] = abs(Xorshift128(N-tid+D_SEED,tid+D_SEED) % (maxY-minY+1)); infect[tid] = 0; infection_time[tid] = 0; } } void human::output_human_info(int N){ int infect_count = 0; ofstream ofs("info_human.csv"); ofs << "id,x,y,infected,infection time" << endl; for (int i=0; i<N; ++i){ if (getInfect(i) == 1){ ++infect_count; ofs << i << "," << getX(i) << "," << getY(i) << "," << "true" << "," << infection_time[i] << endl;//<< getInfectionTime(i) << endl; } else{ ofs << i << "," << getX(i) << "," << getY(i) << "," << "false" << "," << getInfectionTime(i) << endl; } } ofs << endl; ofs << "感染者合計:," << infect_count << "人" << endl; }
b0cb8eb0185425da5ae47abae68094795a5afe6f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> // includes cuda.h and hip/hip_runtime_api.h #include "spray_k.cuh" #include<helper_cuda.h> #include<helper_math.h> #include "utility.h" #include "tables.h" __constant__ FlipConstant dparam; __constant__ LBMConstant LBM_dparam; __constant__ int NX; __constant__ int NY; __constant__ int NZ; __constant__ int NXMC; __constant__ int NYMC; __constant__ int NZMC; texture<uint, 1, hipReadModeElementType> edgeTex; texture<uint, 1, hipReadModeElementType> triTex; texture<uint, 1, hipReadModeElementType> numVertsTex; __device__ float racc = 0.; __device__ float wacc = 0.; __device__ float3 pacc; __device__ float sradiusInv; __constant__ float v_max = (float)0.816496580927726; //!< set maximum velocity to sqrt(2/3), such that f_eq[0] >= 0 __host__ __device__ inline int getidx(int i, int j, int k) { return (i*NZ*NY + j*NZ + k); } inline __host__ __device__ int dot(int3 a,float3 b) { return a.x * b.x + a.y * b.y + a.z*b.z; } inline __host__ __device__ int dot(float3 a, int3 b) { return a.x * b.x + a.y * b.y + a.z*b.z; } //**********************************LBM***************************************** __host__ __device__ float LBMfeq(float3 u, float omega, float rho, int3 vel_i ,float RT0)//vel_i==e[qm][3] { float feq; float3 vel; vel.x = (float)vel_i.x, vel.y = (float)vel_i.y; vel.z = (float)vel_i.z; feq = omega * rho * (1.0 + dot(u, vel) / RT0 + 0.5*dot(u, vel*dot(u, vel) / RT0 / RT0 - dot(u,u) / (2 * RT0))); return feq; } __device__ inline float Vec3_Norm(const float3 v) { // have to change to 'fabs' for 'typedef double real' float a = fabsf(v.x); float b = fabsf(v.y); float c = fabsf(v.z); if (a < b) { if (b < c) { return c*sqrtf(1 +pow(a/c,2) + pow(b/c,2)); } else // a < b, c <= b { return b*sqrtf(1 + pow(a/b,2) + pow(c/b,2)); } } else // b <= a { if (a < c) { return c*sqrtf(1 + pow(a / c,2) + pow(b / c,2)); } else // b <= a, c <= a { if (a != 0) { return a*sqrtf(1 + pow(b / a,2) + pow(c / a,2)); } else { return 0; } } } } __host__ __device__ float LBMheq(float3 u, float omega, float rho, int3 vel_i, float T,float RT0)//vel_i==e[qm][3] { float feq,heq,E; float3 vel; float cv, p0; vel.x = (float)vel_i.x, vel.y = (float)vel_i.y; vel.z = (float)vel_i.z; E = cv*T + dot(u, u) / 2.0; feq = omega * rho * (1.0 + dot(u, vel) / RT0 + 0.5*dot(u, vel*dot(u, vel) / RT0 / RT0 - dot(u, u) / (2 * RT0))); heq = omega * p0 * (dot(vel, u) / RT0 + dot(vel, u)*dot(vel, u) / RT0 / RT0 - dot(u, u) / RT0 + 0.5*(dot(vel, vel) / RT0 - 3.0)) + E*feq; return feq; } __device__ inline float CalcEpsilon(char mark, float rho, float mass) { if (mark & (TYPEFLUID | TYPEBOUNDARY | TYPESOLID)) { return 1; } else if (mark & TYPESURFACE) { assert( rho >= 0); if (rho > 0) { real epsilon = mass / rho; // df->mass can even be < 0 or > df->rho for interface cells to be converted to fluid or empty cells in the next step; // clamp to [0,1] range for numerical stability if (epsilon > 1) { epsilon = 1; } else if (epsilon < 0) { epsilon = 0; } return epsilon; } else { // return (somewhat arbitrarily) a ratio of 1/2 return (real)0.5; } } else // df->type & CT_EMPTY { assert(mark & TYPEVACUUM); return 0; } } __device__ inline float3 CalcLBMNormal(charray mark,farray rho, float* mass, int i,int j, int k) { float3 norm; norm.x = 0.5*(CalcEpsilon(mark[getidx(i - 1, j, k)], rho[getidx(i - 1, j, k)], mass[getidx(i - 1, j, k)]) - CalcEpsilon(mark[getidx(i + 1, j, k)], rho[getidx(i + 1, j, k)], mass[getidx(i + 1, j, k)])); norm.y = 0.5*(CalcEpsilon(mark[getidx(i, j - 1, k)], rho[getidx(i, j - 1, k)], mass[getidx(i, j - 1, k)]) - CalcEpsilon(mark[getidx(i, j + 1, k)], rho[getidx(i, j + 1, k)], mass[getidx(i, j + 1, k)])); norm.z = 0.5*(CalcEpsilon(mark[getidx(i, j, k - 1)], rho[getidx(i, j, k - 1)], mass[getidx(i, j, k - 1)]) - CalcEpsilon(mark[getidx(i, j, k + 1)], rho[getidx(i, j, k + 1)], mass[getidx(i, j, k + 1)])); return norm; } __device__ inline float CalcMassExchange(char mark, char neighmark, float df_neigh, float dF_inv) { // Table 4.1 in Nils Thuerey's PhD thesis if (mark & TYPENOFLUIDNEIGH) { if (neighmark & TYPENOFLUIDNEIGH) return df_neigh - dF_inv; else // neighbor is standard cell or CT_NO_EMPTY_NEIGH return -dF_inv; } else if (mark & TYPENOEMPTYMEIGH) { if (neighmark & TYPENOEMPTYMEIGH) return df_neigh - dF_inv; else // neighbor is standard cell or CT_NO_FLUID_NEIGH return df_neigh; } else { // current cell is standard cell if(neighmark & TYPENOFLUIDNEIGH) return df_neigh; else if (neighmark & TYPENOEMPTYMEIGH) return -dF_inv; else// neighbor is standard cell return df_neigh - dF_inv; } } __device__ inline void LBMAverageSurrounding(charray mark, float * mass, float &rho, farray tmprho, farray df, float &ux, float &uy, float &uz,farray tmpux, farray tmpuy,farray tmpuz, int i, int j, int k,int Qm) { int q; int n = 0,ii,jj,kk,idxneigh; // set mass initially to zero mass[getidx(i,j,k)] = 0; rho = 0; ux = 0; uy = 0; uz = 0; float df_neigh[19] = { 0 }; for (q = 1; q < 19; q++)// omit zero vector { ii = i - LBM_dparam.vel_i[q].x; jj = j - LBM_dparam.vel_i[q].y; kk = k - LBM_dparam.vel_i[q].z; idxneigh = getidx(ii, jj, kk); // fluid or interface cells only if (mark[idxneigh] & (TYPEFLUID | TYPESOLID | TYPESURFACE)) { rho += tmprho(idxneigh); ux += tmpux(idxneigh); uy += tmpuy(idxneigh); uz += tmpuz(idxneigh); n++; } } if (n > 0) { rho /= n; ux /= n; uy /= n; uz /= n; } // calculate equilibrium distribution function for (Qm = 0; Qm < 19; Qm++) df[i,j,k,Qm] = LBMfeq(make_float3(ux, uy, uz), LBM_dparam.omega, rho, make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); } //*********************************LBM**************************************** void copyparamtoGPU(FlipConstant hparam) { checkCudaErrors(hipMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } //*********************************************************************** void copyLBMparamtoGPU(LBMConstant hparam) { checkCudaErrors(hipMemcpyToSymbol(LBM_dparam, &hparam, sizeof(LBMConstant))); } //************************************************************ void LBMcopyparamtoGPU(FlipConstant hparam) { checkCudaErrors(hipMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } void copyNXNYNZtoGPU(int nx, int ny, int nz) { checkCudaErrors(hipMemcpyToSymbol(NX, &nx, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NY, &ny, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NZ, &nz, sizeof(int))); } void copyNXNYNZtoGPU_MC(int nx, int ny, int nz) { checkCudaErrors(hipMemcpyToSymbol(NXMC, &nx, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NYMC, &ny, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NZMC, &nz, sizeof(int))); } __device__ inline void getijk(int &i, int &j, int &k, int &idx) { i = idx / (NZ*NY); j = idx / NZ%NY; k = idx%NZ; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos) { pos = (pos - dparam.gmin) / dparam.cellsize; i = (pos.x >= 0 && pos.x<NX) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<NY) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<NZ) ? ((int)pos.z) : 0; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos, int w, int h, int d, float dx) { pos = (pos - dparam.gmin) / dx; i = (pos.x >= 0 && pos.x<w) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<h) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<d) ? ((int)pos.z) : 0; } __device__ inline int getidx(int i, int j, int k, int w, int h, int d) { return (i*h*d + j*d + k); } __device__ inline float getRfromMass(float m) { return pow(m*0.75f / M_PI / dparam.waterrho, 0.333333); } __device__ inline float getMassfromR(float r) { return dparam.waterrho*M_PI*4.0 / 3 * r*r*r; } // __global__ void cptdivergence(farray outdiv, farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEFLUID) div = (ux(i + 1, j, k) - ux(i, j, k) + uy(i, j + 1, k) - uy(i, j, k) + uz(i, j, k + 1) - uz(i, j, k)) / h; outdiv[idx] = div; } } __device__ inline int clampidx(int i, int j, int k) { i = max(0, min(i, NX - 1)); j = max(0, min(j, NY - 1)); k = max(0, min(k, NZ - 1)); return (i*NZ*NY + j*NZ + k); } __device__ inline float trilinear(farray u, float x, float y, float z, int w, int h, int d) { x = fmaxf(0.0f, fminf(x, w)); y = fmaxf(0.0f, fminf(y, h)); z = fmaxf(0.0f, fminf(z, d)); int i = fminf(x, w - 2); int j = fminf(y, h - 2); int k = fminf(z, d - 2); return (k + 1 - z)*((j + 1 - y)*((i + 1 - x)*u(i, j, k) + (x - i)*u(i + 1, j, k)) + (y - j)*((i + 1 - x)*u(i, j + 1, k) + (x - i)*u(i + 1, j + 1, k))) + (z - k)*((j + 1 - y)*((i + 1 - x)*u(i, j, k + 1) + (x - i)*u(i + 1, j, k + 1)) + (y - j)*((i + 1 - x)*u(i, j + 1, k + 1) + (x - i)*u(i + 1, j + 1, k + 1))); } __device__ float3 getVectorFromGrid(float3 pos, farray phigrax, farray phigray, farray phigraz) { float3 res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) res.x = trilinear(phigrax, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.y = trilinear(phigray, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.z = trilinear(phigraz, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } __device__ float getScaleFromFrid(float3 pos, farray phi) { float res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) res = trilinear(phi, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } //Jacobi iteration: Ax=b //todo: check this function and maybe get another solver. __global__ void JacobiIter(farray outp, farray p, farray b, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float resp = 0, h = dparam.cellsize.x; float p1, p2, p3, p4, p5, p6; float p0 = p[idx]; int i, j, k; if (mark[idx] == TYPEFLUID) { getijk(i, j, k, idx); p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : p(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : p(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : p(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : p(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : p(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : p(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 - h*h*b(i, j, k)) / 6.0f; } outp[idx] = resp; } } __global__ void setPressBoundary(farray press) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0) press[idx] = press(i + 1, j, k); if (j == 0) press[idx] = press(i, j + 1, k); if (k == 0) press[idx] = press(i, j, k + 1); if (i == NX - 1) press[idx] = press(i - 1, j, k); if (j == NY - 1) press[idx] = press(i, j - 1, k); if (k == NZ - 1) press[idx] = press(i, j, k - 1); } } // __global__ void subGradPress(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } __device__ float3 getParticleVelFromGrid(float3 pos, farray ux, farray uy, farray uz) { float3 vel; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) vel.x = trilinear(ux, x, y - 0.5f, z - 0.5f, NX + 1, NY, NZ); vel.y = trilinear(uy, x - 0.5f, y, z - 0.5f, NX, NY + 1, NZ); vel.z = trilinear(uz, x - 0.5f, y - 0.5f, z, NX, NY, NZ + 1); return vel; } __global__ void mapvelg2p_flip(float3 *ppos, float3 *vel, char* parflag, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = getParticleVelFromGrid(ipos, ux, uy, uz); vel[idx] += gvel; } } __device__ inline float sharp_kernel(float r2, float h) { return fmax(h*h / fmax(r2, 0.0001f) - 1.0f, 0.0f); } __global__ void mapvelp2g_slow(float3 *pos, float3 *vel, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float w, weight, RE = 1.4, dis2, usum; float3 gpos; float scale = 1 / dparam.cellsize.x; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = 0; getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].x; } usum = (weight>0) ? (usum / weight) : 0.0f; ux(i, j, k) = usum; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot((pos[p] * scale) - gpos, (pos[p] * scale) - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].y; } usum = (weight>0) ? (usum / weight) : 0.0f; uy(i, j, k) = usum; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].z; } usum = (weight>0.00001) ? (usum / weight) : 0.0f; uz(i, j, k) = usum; } } __device__ inline bool verifycellidx(int i, int j, int k) { if (i<0 || i>NX - 1 || j<0 || j>NY - 1 || k<0 || k>NZ - 1) return false; return true; } __device__ inline bool verifycellidx(int i, int j, int k, int w, int h, int d) { if (i<0 || i>w - 1 || j<0 || j>h - 1 || k<0 || k>d - 1) return false; return true; } __global__ void addgravityforce_k(float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEFLUID ) vel[idx] += dt*dparam.gravity; if ( parflag[idx] == TYPESOLID) vel[idx] += dt*dparam.gravity ; } } __global__ void addbuoyancyforce_k(float dheight, float3 *pos, float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIR) vel[idx] -= dt*dparam.gravity * 1.1f; //todo: else if (parflag[idx] == TYPEAIRSOLO) vel[idx] -= dt*dparam.gravity * 1.1f; else if (parflag[idx] == TYPESOLID) vel[idx] -= dt*dparam.gravity * 0.5f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void addbuoyancyforce_vel(float velMax, float3 *pos, float3 *vel, char* parflag, int pnum, float dt, float buoyanceRateAir, float buoyanceRateSolo) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { float rate = fmax(velMax - vel[idx].z, 0.0f) / velMax; if (parflag[idx] == TYPEAIR) vel[idx].z -= dt*dparam.gravity.z * rate * buoyanceRateAir; //todo: else if (parflag[idx] == TYPEAIRSOLO) vel[idx].z -= dt*dparam.gravity.z *rate* buoyanceRateSolo; else if (parflag[idx] == TYPESOLID) vel[idx].z -= dt*dparam.gravity.z * .55f;//0.55f; //else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void advectparticle(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); //vel[idx] += dt*dparam.gravity; ipos += gvel*dt; if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //check boundary ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, ipos.z); if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void advectparticle_RK2(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint = getParticleVelFromGrid(midpoint, ux, uy, uz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back if (parflag[idx] != TYPESOLID) { pvel[idx] = ivel; ppos[idx] = ipos; } else pvel[idx] = ivel; } } __global__ void flipAirVacuum(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == TYPEVACUUM) mark[idx] = TYPEAIR; } } __global__ void markair(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { mark[idx] = TYPEAIR; } } __global__ void markforsmoke(charray mark, farray spraydense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { /* if(spraydense[idx]>0 )*/ mark[idx] = TYPEFLUID; } } __global__ void markfluid(charray mark, float3 *pos, char *parflag, int pnum) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { int i, j, k; //todo: ???? Should spray particle count??? or should we have a more accurate mark method. if( parflag[idx]==TYPEFLUID) { getijkfrompos(i, j, k, pos[idx]); mark(i, j, k) = TYPEFLUID; // } } } //fluid particle __global__ void markfluid_dense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) mark[idx] = TYPEVACUUM; else if (cntfluidsolid>cntair) mark[idx] = TYPEFLUID; else mark[idx] = TYPEAIR; } } //************************LBM***************************** //fluid particle __global__ void markfluid_LBM_Init(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int Thres)//surface { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p < end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) //for now (181210), there is no air cell considered in LBM framework cntair++; } } if (cntfluidsolid == 0 ) mark[idx] = TYPEVACUUM; else if (cntfluidsolid >= Thres) // initial particle number per cell is 8 mark[idx] = TYPEFLUID; else mark[idx] = TYPESURFACE; // particle number [1,7] } } __global__ void markfluid_LBMdense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) //int j = 0; //for(int idx=0; idx<dparam.gnum; idx++) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p < end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) { mark[idx] = TYPEVACUUM; } else if (cntfluidsolid > cntair) { mark[idx] = TYPEFLUID; // printf("%d ", cntfluidsolid); } else mark[idx] = TYPEAIR; } } __global__ void markBoundaryCell(charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY; } } __global__ void setgridcolor_k(float* color, ECOLORMODE mode, farray p, farray ux, farray uy, farray uz, farray div, farray phi, charray mark, farray ls, farray tp, float sigma, float temperatureMax, float temperatureMin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float3 rescolor = make_float3(0.0); int cellindex = NY / 2; if (mode == COLOR_PRESS) { if (j != cellindex || p[idx] == 0) rescolor = make_float3(0, 0, 1); else if (p[idx]>0) rescolor = make_float3(0, 1, 0); else if (p[idx]<0) rescolor = make_float3(1, 0, 0); //rescolor = mapColorBlue2Red( 30000*abs(p[idx]) ); } else if (mode == COLOR_UX) { if (j != cellindex || ux(i + 1, j, k) + ux(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(ux(i + 1, j, k) + ux(i, j, k))); } else if (mode == COLOR_UY) { if (j != cellindex || uy(i, j + 1, k) + uy(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(uy(i, j + 1, k) + uy(i, j, k))); } else if (mode == COLOR_UZ) { if (j != cellindex/*||uz(i,j,k+1)+uz(i,j,k)<0*/) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(5 * abs(uz(i, j, k))); } else if (mode == COLOR_DIV) { if (j != cellindex || div[idx] == 0) rescolor = make_float3(0, 0, 1); else if (div[idx]>0) rescolor = make_float3(0, 1, 0); else if (div[idx]<0) rescolor = make_float3(1, 1, 0); } else if (mode == COLOR_PHI) { if (phi[idx]>3 * NX - 1 || j != cellindex) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5f + phi[idx]); } else if (mode == COLOR_MARK) { if (j != cellindex) rescolor = make_float3(0, 0, 1); else { if (mark[idx] == TYPEAIR) rescolor = make_float3(0, 1, 0); else if (mark[idx] == TYPEFLUID) rescolor = make_float3(1, 0, 0); else if (mark[idx] == TYPEVACUUM) rescolor = make_float3(1, 1, 0); else if (mark[idx] == TYPEBOUNDARY) rescolor = make_float3(0, 1, 1); else rescolor = make_float3(0, 0, 1); //rescolor = mapColorBlue2Red( (int)(mark[idx])+1.0f ) ; } } else if (mode == COLOR_LS) { if (j == cellindex && ls[idx]>0) rescolor = mapColorBlue2Red(abs(ls[idx] / dparam.cellsize.x)); else rescolor = make_float3(0, 0, 1); } else if (mode == COLOR_TP) { if (j != cellindex || i == 0 || i == NX - 1 || k == 0 || k == NZ - 1) rescolor = make_float3(0, 0, 1); else // rescolor = mapColorBlue2Red( abs(tp[idx]*dparam.cellsize.x*5/sigma) ); //rescolor = mapColorBlue2Red( abs(tp[idx]-353)/5.0f ); rescolor = mapColorBlue2Red((tp[idx] - temperatureMin) / (temperatureMax - temperatureMin)*6.0f); } color[idx * 3] = rescolor.x; color[idx * 3 + 1] = rescolor.y; color[idx * 3 + 2] = rescolor.z; } } __host__ __device__ inline float3 mapColorBlue2Red(float v) { float3 color; if (v<0) return make_float3(0.0f, 0.0f, 1.0f); int ic = (int)v; float f = v - ic; switch (ic) { case 0: { color.x = 0; color.y = f / 2; color.z = 1; } break; case 1: { color.x = 0; color.y = f / 2 + 0.5f; color.z = 1; } break; case 2: { color.x = f / 2; color.y = 1; color.z = 1 - f / 2; } break; case 3: { color.x = f / 2 + 0.5f; color.y = 1; color.z = 0.5f - f / 2; } break; case 4: { color.x = 1; color.y = 1.0f - f / 2; color.z = 0; } break; case 5: { color.x = 1; color.y = 0.5f - f / 2; color.z = 0; } break; default: { color.x = 1; color.y = 0; color.z = 0; } break; } return color; } __global__ void initphi(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) phi[idx] = -0.5; else phi[idx] = NX * 3; } } __global__ void initSolidPhi(farray phi, uint *gridstart, uint *gridend, char *pflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { bool flag = false; uint start = gridstart[idx]; if (start != CELL_UNDEF) { for (; start<gridend[idx]; start++) { if (pflag[start] == TYPESOLID) flag = true; } } if (flag) phi[idx] = -0.5f; else phi[idx] = 3 * NX; } } __device__ void solvedistance(float a, float b, float c, float &x) { float d = fmin(a, fmin(b, c)) + 1; if (d>fmax(a, fmax(b, c))) { d = (a + b + c + sqrt(3 - (a - b)*(a - b) - (a - c)*(a - c) - (b - c)*(b - c))) / 3; } if (d<x) x = d; } __global__ void sweepphi(farray phi) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepphibytype(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) return; int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepu(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray phi, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; // if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i - 1, j, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ - 1) continue; wx = -di*(phi(i, j, k) - phi(i - 1, j, k)); if (wx<0) continue; wy = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j + dj, k) - phi(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j, k + dk) - phi(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i, j - 1, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(phi(i, j, k) - phi(i, j - 1, k)); if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j - 1, k) - phi(i + di, j, k) - phi(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (phi(i, j, k) + phi(i, j - 1, k) - phi(i, j, k + dk) - phi(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i, j, k - 1) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(phi(i, j, k) - phi(i, j, k - 1)); if (wz<0) continue; wy = (phi(i, j, k) + phi(i, j, k - 1) - phi(i, j + dj, k) - phi(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j, k - 1) - phi(i + di, j, k) - phi(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } __global__ void setSmokeBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (j == 0) ux(i, j, k) = ux(i, j + 1, k); else if (j == NY - 1) ux(i, j, k) = ux(i, j - 1, k); else if (k == 0) ux(i, j, k) = ux(i, j, k + 1); else if (k == NZ - 1) ux(i, j, k) = ux(i, j, k - 1); else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (i == 0) uy(i, j, k) = uy(i + 1, j, k); else if (i == NX - 1) uy(i, j, k) = uy(i - 1, j, k); else if (k == 0) uy(i, j, k) = uy(i, j, k + 1); else if (k == NZ - 1) uy(i, j, k) = uy(i, j, k - 1); else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 2) uz(i, j, k) = 0.0f; else if (i == 0) uz(i, j, k) = uz(i + 1, j, k); else if (i == NX - 1) uz(i, j, k) = uz(i - 1, j, k); else if (j == 0) uz(i, j, k) = uz(i, j + 1, k); else if (j == NY - 1) uz(i, j, k) = uz(i, j - 1, k); else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void setWaterBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 1) //ceiling uz(i, j, k) = 0.0f; else if (k == uz.zn - 2) //ceiling. uz(i, j, k) = (uz(i, j, k - 1)<0) ? (uz(i, j, k - 1)) : 0; else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void computeDeltaU(farray ux, farray uy, farray uz, farray uxold, farray uyold, farray uzold) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) uxold[idx] = ux[idx] - uxold[idx]; if (idx < dparam.gvnum.y) uyold[idx] = uy[idx] - uyold[idx]; if (idx < dparam.gvnum.z) uzold[idx] = uz[idx] - uzold[idx]; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p); int gridindex = getidx(i, j, k); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD_MC(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p, NXMC, NYMC, NZMC, dparam.cellsize.x / NXMC*NX); int gridindex = getidx(i, j, k, NXMC, NYMC, NZMC); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index float3* sortedPos, // output: sorted positions float3* sortedVel, // output: sorted velocities char* sortedflag, float* sortedmass, float* sortedTemperature, float* sortedheat, float* sortedsolubility, float* sortedgascontain, uint * gridParticleHash, // input: sorted grid hashes uint * gridParticleIndex,// input: sorted particle indices float3* oldPos, // input: sorted position array float3* oldVel, // input: sorted velocity array char* oldflag, float* oldmass, float* oldtemperature, float* oldheat, float* oldsolubility, float* oldgascontain, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; //no. hash 's grid cellstart is index if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float3 pos = oldPos[sortedIndex]; // macro does either global read or texture fetch float3 vel = oldVel[sortedIndex]; // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; sortedflag[index] = oldflag[sortedIndex]; sortedmass[index] = oldmass[sortedIndex]; sortedTemperature[index] = oldtemperature[sortedIndex]; sortedheat[index] = oldheat[sortedIndex]; sortedsolubility[index] = oldsolubility[sortedIndex]; sortedgascontain[index] = oldgascontain[sortedIndex]; } } __global__ void advectux(farray outux, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); float3 pos = make_float3(i, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX || j == NY - 1 || k == NZ - 1) outux[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = ux[idx]; vel.y = (uy(i - 1, j, k) + uy(i - 1, j + 1, k) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = (uz(i - 1, j, k) + uz(i - 1, j, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(ux, oldpos.x, oldpos.y - 0.5f, oldpos.z - 0.5f, ux.xn, ux.yn, ux.zn); outux[idx] = oldu * velocitydissipation; } } } __global__ void advectuy(farray outuy, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.y) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uy.xn, uy.yn, uy.zn); float3 pos = make_float3(i + 0.5, j, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY || k == NZ - 1) outuy[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j - 1, k) + ux(i + 1, j - 1, k) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = uy[idx]; vel.z = (uz(i, j - 1, k) + uz(i, j - 1, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uy, oldpos.x - 0.5f, oldpos.y, oldpos.z - 0.5f, uy.xn, uy.yn, uy.zn); outuy[idx] = oldu * velocitydissipation; } } } __global__ void advectuz(farray outuz, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); float3 pos = make_float3(i + 0.5, j + 0.5, k); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ) outuz[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k - 1) + ux(i + 1, j, k - 1) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = (uy(i, j, k - 1) + uy(i, j + 1, k - 1) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = uz[idx]; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uz, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z, uz.xn, uz.yn, uz.zn); //float oldu = -dparam.dt*3.8f; outuz[idx] = oldu * velocitydissipation; } } } __global__ void advectscaler(farray outscalar, farray scalar, farray ux, farray uy, farray uz, float densedissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { //get pos of ux point int i, j, k; getijk(i, j, k, idx); float3 pos = make_float3(i + 0.5, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ - 1) outscalar[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k) + ux(i + 1, j, k))*0.5f; vel.y = (uy(i, j, k) + uy(i, j + 1, k))*0.5f; vel.z = (uz(i, j, k) + uz(i, j, k + 1))*0.5f; //enforce wind as an external velocity field. vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float olds = trilinear(scalar, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z - 0.5f, NX, NY, NZ); outscalar[idx] = olds * densedissipation; } } } __global__ void setsmokedense(farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, dense.xn, dense.yn, dense.zn); if (i>28 && i<36 && j>28 && j<36 && k<6) dense[idx] = dparam.m0*6.0f; } } __global__ void setsmokevel(farray uz, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; // if( k>1 && k<NZ-1 ) // if( dense(i,j,k-1)>0 ) // uz[idx] = 4.0f; if (k>1 && k<NZ - 1) { float alpha = 1000.0f; uz(i, j, k) += alpha * dense(i, j, k - 1); } } } __global__ void setsmokevel_nozzle(farray ux, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; //float alpha = 10000.0f; if (i>1 && i<NX - 1) if (dense(i - 1, j, k)>0) ux[idx] = 8.0f; //uz(i,j,k) += alpha * dense(i,j,k-1); } } surface<void, cudaSurfaceType3D> surfaceWrite; __global__ void writedens2surface_k(farray dens) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); // float4 idens = make_float4( 0.0f ); // if(i>10&&i<50 &&j>10&&j<50&&k>10&&k<50 ) // idens = make_float4( 1.0f ); float4 idens = make_float4(dens[idx] * 10000); surf3Dwrite(idens, surfaceWrite, i*sizeof(float4), j, k); //why *sizeof(float4)? } } void writedens2surface(hipArray* cudaarray, int blocknum, int threadnum, farray dense) { hipBindSurfaceToArray(surfaceWrite, cudaarray); //kernel writedens2surface_k << <blocknum, threadnum >> >(dense); } __device__ float smooth_kernel(float r2, float h) { return fmax(1.0f - r2 / (h*h), 0.0f); } __device__ float3 sumcellspring(float3 ipos, float3 *pos, float* pmass, char* parflag, uint *gridstart, uint *gridend, int gidx, float idiameter) { if (gridstart[gidx] == CELL_UNDEF) return make_float3(0.0f); uint start = gridstart[gidx]; uint end = gridend[gidx]; float dist, w; float3 spring = make_float3(0.0f); float r = 0; for (uint p = start; p<end; ++p) { //if( parflag[p]!=TYPESOLID ) //solid { dist = length(pos[p] - ipos); r = idiameter;//+getRfromMass( pmass[p] ); w = pmass[p] * smooth_kernel(dist*dist, r); if (dist>0.1f*idiameter) // spring += w*(ipos - pos[p]) / dist; } } return spring; } __global__ void correctparticlepos(float3* outpos, float3* ppos, float *pmass, char* parflag, int pnum, uint* gridstart, uint *gridend, float correctionspring, float correctionradius, float3 *pepos, float *peradius, int penum) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID/* || parflag[idx]==TYPEAIR*/ || parflag[idx] == TYPEAIRSOLO) { outpos[idx] = ppos[idx]; return; } float3 ipos = ppos[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float3 spring = make_float3(0.0f); float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float re = correctionradius*dparam.cellsize.x; // float re= getRfromMass( pmass[idx] ); int lv = 1; // float idiameter = 2*pow(0.75*pmass[idx]/dparam.waterrho/M_PI, 1.0/3); //SPH for (int di = -lv; di <= lv; di++) for (int dj = -lv; dj <= lv; dj++) for (int dk = -lv; dk <= lv; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { spring += sumcellspring(ipos, ppos, pmass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk), re); } } // //emptyempty // float w, dist; // for( int p=0; p<penum; p++ ) // { // if( peradius[p]>0.5f*dparam.cellsize.x ) // // { // dist=length(pepos[p]-ipos); // w = pmass[idx]*smooth_kernel(dist*dist, peradius[p]); // // if( dist>0.1f*peradius[p] ) // // spring += w*(ipos-pepos[p]) / dist; // } // } spring *= correctionspring*re; if (length(dparam.dt*spring)>0.3f*dparam.cellsize.x) ipos += dparam.cellsize.x * 0.3f * spring / length(spring); else ipos += dparam.dt*spring; ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, fmin(tmax.z, ipos.z)); outpos[idx] = ipos; } } __device__ void sumcelldens(float &phi, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis = length(pos[p] - gpos); if (phi>dis) phi = dis; } } } //MC //[2012][TVCG]Preserving Fluid Sheets with Adaptively Sampled Anisotropic Particles __global__ void genWaterDensfield(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NX + 1)*(NY + 1)*(NZ + 1)) { float h = dparam.cellsize.x; float phi = 8 * fMCDensity*h; //from flip3d_vs //get position int i, j, k; getijk(i, j, k, idx, NX + 1, NY + 1, NZ + 1); float3 p = make_float3(i, j, k)*h; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk)) { sumcelldens(phi, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); } } phi = fMCDensity*h - phi; if (i*j*k == 0 || i == NX || j == NY || k == NZ) phi = fmin(phi, -0.1f); outdens[idx] = phi; } } __device__ float3 sumcelldens2(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield2(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 1.0f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens2(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, MCParType); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_Gas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, SCENE scene) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || (parflag[p] == TYPEAIRSOLO && scene != SCENE_INTERACTION)) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_Gas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.8f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_Gas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, scene); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_liquidAndGas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, float sradiusInv, float radius, float racc,float wacc, float3 pacc) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; //float r = R / 2.; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO || parflag[p] == TYPEFLUID) { dis = length(pos[p] - gpos); // { // float s = dot(pos[p] - gpos, pos[p] - gpos)*sradiusInv;//mantaflow // w = max(0., (1. - s)); // wacc += w; // racc += radius * w; // pacc += pos[p] * w; // // } if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_liquidAndGas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. //float r = 2.5f*sqrt(3.)*1.01*0.5*h; //mantaFlow flip03_gen float r = 0.75*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); //mantaflow //float racc, wacc; //float3 pacc = make_float3(0.); // float phiv = r; // sradiusInv = 1. / (4. *r * r); // int radius = int(1. * r) + 1; // float3 gridPos = make_float3(i + 0.5, j + 0.5, k + 0.5)* h; float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_liquidAndGas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, sradiusInv, r,racc,wacc,pacc); // printf("%f !!!!", pacc.x); ///////////////////////// // racc /= wacc; // pacc /= wacc; // phiv = fabs(length(gridPos-pacc)); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. // phi = phiv; //mantaflow if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens3(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float h, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { //GY:CFG2012Parallel Surface Reconstruction for Particle-Based Fluids // [2007CAVW]A Unified Particle Model for Fluid-Solid Interactions // 2012 VRIPHYSAn Efficient Surface Reconstruction Pipeline for Particle-Based Fluids dis = length(pos[p] - gpos); //v-xi if (dis<h) { // w = h*h -dis*dis; // // w = w*w*w; // res += pos[p] * w; // wsum += w; w = dis / (4 * h); // |v-xi|/R [2007 CAVW] R=2h=4r w = 1 - w*w; // 1-s~2 w = max(w*w*w, 0.0); // k(s) res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]VRIPHYSAn Efficient Surface Reconstruction Pipeline for Particle-Based Fluids __global__ void genWaterDensfield_GY(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType, float3 centertmp) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.75f*h; float thigh = 0.51; float tlow = 0.49; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens3(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h, MCParType); } } if (wsum>0) { center /= wsum; //~v float3 delta = center - centertmp; float Ev = max(delta.x, max(delta.y, delta.z)) / (4 * h); // // float Ev = 3.8; centertmp = center; // centertmp:center Evdelta float gamma = (thigh - Ev) / (thigh - tlow); float f = (Ev<tlow) ? 1 : gamma*gamma*gamma - 3 * gamma*gamma + 3 * gamma; // phi = r - length( p - center ); phi = (length(p - center) - r*f); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = fmin(phi, -10.0f); outdens[idx] = phi; } } __global__ void markSolid_sphere(float3 spherepos, float sphereradius, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if ((i>NX/2-2) &&i<2.5*NX/3 && j>3.5*NY/9 && j< 6*NY/9 && k<NZ/5) mark[idx] = TYPEBOUNDARY; } } __global__ void markSolid_waterfall(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_waterfall_liquid(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z*0.7f) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z*0.7f) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_terrain(charray mark, charray mark_terrain) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark_terrain[idx] == TYPEBOUNDARY) mark[idx] = TYPEBOUNDARY; } } //MC __global__ void genSphereDensfield(farray outdens, float3 center, float radius) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { //float3 center = make_float3(0.5f); float phi; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -0.1; else { float3 p = make_float3(i, j, k)*dparam.cellsize.x / (NXMC / NX); phi = radius - length(p - center); } outdens[idx] = phi; } } //-----MC from cuda sdk 4.2 // classify voxel based on number of vertices it will generate // one thread per voxel (cell) __global__ void classifyVoxel(uint* voxelVerts, uint *voxelOccupied, farray volume, float isoValue) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<NXMC*NYMC*NZMC) { int i, j, k; getijk(i, j, k, idx, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate flag indicating if each vertex is inside or outside isosurface uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // read number of vertices from texture uint numVerts = tex1Dfetch(numVertsTex, cubeindex); voxelVerts[idx] = numVerts; voxelOccupied[idx] = (numVerts > 0); }//endif } // compact voxel array __global__ void compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint i = __mul24(blockId, blockDim.x) + threadIdx.x; if (voxelOccupied[i] && (i < numVoxels)) { compactedVoxelArray[voxelOccupiedScan[i]] = i; } } // compute interpolated vertex along an edge __device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) { float t = (isolevel - f0) / (f1 - f0); return lerp(p0, p1, t); } // calculate triangle normal __device__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2) { float3 edge0 = *v1 - *v0; float3 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge0, edge1); } __device__ int GetVertexID(int i, int j, int k) { return 3 * (i*(NZMC + 1)*(NYMC + 1) + j*(NZMC + 1) + k); } __device__ int GetEdgeID(int nX, int nY, int nZ, int edge) { // return GetVertexID( nX,nY,nZ ); switch (edge) { case 0: return GetVertexID(nX, nY, nZ) + 1; case 1: return GetVertexID(nX + 1, nY, nZ); case 2: return GetVertexID(nX, nY + 1, nZ) + 1; case 3: return GetVertexID(nX, nY, nZ); case 4: return GetVertexID(nX, nY, nZ + 1) + 1; case 5: return GetVertexID(nX + 1, nY, nZ + 1); case 6: return GetVertexID(nX, nY + 1, nZ + 1) + 1; case 7: return GetVertexID(nX, nY, nZ + 1); case 8: return GetVertexID(nX, nY, nZ) + 2; case 9: return GetVertexID(nX + 1, nY, nZ) + 2; case 10: return GetVertexID(nX + 1, nY + 1, nZ) + 2; case 11: return GetVertexID(nX, nY + 1, nZ) + 2; default: // Invalid edge no. return -1; } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles2(float3 *pos, float3 *norm, uint *compactedVoxelArray, uint *numVertsScanned, farray volume, float isoValue, uint activeVoxels, uint maxVerts) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; float3 *v[3]; uint edge; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); v[0] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); v[1] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); v[2] = &vertlist[(edge*NTHREADS) + threadIdx.x]; // calculate triangle surface normal float3 n = calcNormal(v[0], v[1], v[2]); /*if (index < (maxVerts - 3)) */{ pos[index] = *v[0]; norm[index] = n; pos[index + 1] = *v[1]; norm[index + 1] = n; pos[index + 2] = *v[2]; norm[index + 2] = n; } } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles_indices(float3 *pTriVertex, uint *pTriIndices, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels, uint maxVerts, uint *MCEdgeIdxMapped, uint *numVertsScanned) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge, mappededgeidx; for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; //vertex index to write back, sort by each triangle. //triangle edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 1] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 2] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); } } __global__ void markActiveEdge_MC(uint *outmark, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge; for (int idxVert = 0; idxVert<numVerts; idxVert++) { //outmark0 edge = tex1Dfetch(triTex, (cubeindex * 16) + idxVert); outmark[GetEdgeID(i, j, k, edge)] = 1; } //debug // for( int edge=0; edge<12; edge++ ) // outmark[GetEdgeID(i,j,k,edge)] = 1; } // __global__ void calnormal_k(float3 *ppos, float3 *pnor, int pnum, uint *indices, int indicesnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < indicesnum / 3) //face number { int i1 = indices[idx * 3 + 0]; int i2 = indices[idx * 3 + 1]; int i3 = indices[idx * 3 + 2]; float3 p1 = ppos[i1]; float3 p2 = ppos[i2]; float3 p3 = ppos[i3]; //compute float3 nor = cross(p2 - p1, p3 - p1); //write back atomicAdd(&pnor[i1].x, nor.x); atomicAdd(&pnor[i2].x, nor.x); atomicAdd(&pnor[i3].x, nor.x); atomicAdd(&pnor[i1].y, nor.y); atomicAdd(&pnor[i2].y, nor.y); atomicAdd(&pnor[i3].y, nor.y); atomicAdd(&pnor[i1].z, nor.z); atomicAdd(&pnor[i2].z, nor.z); atomicAdd(&pnor[i3].z, nor.z); } } // __global__ void normalizeTriangleNor_k(float3 *pnor, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < pnum) //vertex number { if (length(pnor[idx])>0) pnor[idx] = normalize(pnor[idx]); } } void allocateTextures(uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable) { checkCudaErrors(hipMalloc((void**)d_edgeTable, 256 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256 * sizeof(uint), hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned); checkCudaErrors(hipBindTexture(0, edgeTex, *d_edgeTable, channelDesc)); checkCudaErrors(hipMalloc((void**)d_triTable, 256 * 16 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_triTable, (void *)triTable, 256 * 16 * sizeof(uint), hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, triTex, *d_triTable, channelDesc)); checkCudaErrors(hipMalloc((void**)d_numVertsTable, 256 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256 * sizeof(uint), hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc)); } //1*nout(outCPU) __global__ void arrayproduct_k(float* out, float* x, float *y, int n) { extern __shared__ float sdata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; sdata[tid] = (i >= n) ? 0 : (x[i] * y[i]); __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = sdata[0]; } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID) //todo: should add typesolid or not. { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; //notice: xAIR0 sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void buildprecondition_pcg(farray P, charray mark, farray ans, farray input, int n) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<n) { ans[idx] = 1.0f / 6 * input[idx]; } } __global__ void copyParticle2GL_vel_k(float3* ppos, float3 *pvel, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; if (pflag[idx] == TYPEFLUID) { rendercolor[idx * 3] = 1.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 0.0f; } else if (pflag[idx] == TYPEAIR) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 1.0f; } else if (pflag[idx] == TYPESOLID) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 1.0f; rendercolor[idx * 3 + 2] = 0.0f; } } } __global__ void copyParticle2GL_radius_k(float3* ppos, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor, float minmass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; minmass *= 1.2f; //trick float rate = (pmass[idx] - minmass*dparam.m0) / (dparam.m0 - minmass*dparam.m0); rate = fmax(0.0f, fmin(1.0f, rate)); { float3 color = mapColorBlue2Red(powf(rate, 1.0f / 3)*6.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } } __device__ inline void atomicaddfloat3(float3 *a, int idx, float3 b) { atomicAdd(&a[idx].x, b.x); atomicAdd(&a[idx].y, b.y); atomicAdd(&a[idx].z, b.z); } __global__ void smooth_computedisplacement(float3 *displacement, int *weight, float3 *ppos, uint *indices, int trianglenum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<trianglenum) { uint p1 = indices[idx * 3]; uint p2 = indices[idx * 3 + 1]; uint p3 = indices[idx * 3 + 2]; atomicaddfloat3(displacement, p1, ppos[p2] - ppos[p1]); atomicaddfloat3(displacement, p1, ppos[p3] - ppos[p1]); atomicaddfloat3(displacement, p2, ppos[p1] - ppos[p2]); atomicaddfloat3(displacement, p2, ppos[p3] - ppos[p2]); atomicaddfloat3(displacement, p3, ppos[p1] - ppos[p3]); atomicaddfloat3(displacement, p3, ppos[p2] - ppos[p3]); atomicAdd(&weight[p1], 2); atomicAdd(&weight[p2], 2); atomicAdd(&weight[p3], 2); } } __global__ void smooth_addDisplacement(float3 *displacement, int *weight, float3 *ppos, int vertexnum, float param) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<vertexnum) { if (weight[idx]>0) ppos[idx] += param * displacement[idx] / weight[idx]; displacement[idx] = make_float3(0.0f); weight[idx] = 0; } } //diffuse density field. __global__ void diffuse_dense(farray outp, farray inp, charray mark, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outp.xn * outp.yn * outp.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inp[idx]; int i, j, k; getijk(i, j, k, idx, outp.xn, outp.yn, outp.zn); if (mark(i, j, k) == TYPEBOUNDARY) outp[idx] = 0.0f; else { p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outp[idx] = resp; } } } //diffuse velocity field. __global__ void diffuse_velocity(farray outv, farray inv, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outv.xn * outv.yn * outv.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inv[idx]; int i, j, k; getijk(i, j, k, idx, outv.xn, outv.yn, outv.zn); if (i == 0 || j == 0 || k == 0 || i >= outv.xn - 1 || j >= outv.yn - 1 || k >= outv.zn - 1) outv[idx] = p0; else { p1 = inv(i + 1, j, k); p2 = inv(i, j + 1, k); p3 = inv(i, j, k + 1); p4 = inv(i - 1, j, k); p5 = inv(i, j - 1, k); p6 = inv(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outv[idx] = resp; } } } //maxLength, hashPointsblockhash __global__ void createAABB_q(float3* points, int nPoints, uint3* faces, int nFaces, float *maxLength, float3* hashPoints) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= nFaces) return; __shared__ float maxArray[256]; uint p1 = faces[index].x; uint p2 = faces[index].y; uint p3 = faces[index].z; // float3 px = points[p1]; float3 py = points[p2]; float3 pz = points[p3]; AABB aabb; aabb.xMin = (px.x>py.x) ? py.x : px.x; aabb.xMin = (aabb.xMin>pz.x) ? pz.x : aabb.xMin; aabb.xMax = (px.x<py.x) ? py.x : px.x; aabb.xMax = (aabb.xMax<pz.x) ? pz.x : aabb.xMax; aabb.yMin = (px.y>py.y) ? py.y : px.y; aabb.yMin = (aabb.yMin>pz.y) ? pz.y : aabb.yMin; aabb.yMax = (px.y<py.y) ? py.y : px.y; aabb.yMax = (aabb.yMax<pz.y) ? pz.y : aabb.yMax; aabb.zMin = (px.z>py.z) ? py.z : px.z; aabb.zMin = (aabb.zMin>pz.z) ? pz.z : aabb.zMin; aabb.zMax = (px.z<py.z) ? py.z : px.z; aabb.zMax = (aabb.zMax<pz.z) ? pz.z : aabb.zMax; float tempMaxLength = aabb.xMax - aabb.xMin; tempMaxLength = (tempMaxLength>aabb.yMax - aabb.yMin) ? (tempMaxLength) : (aabb.yMax - aabb.yMin); tempMaxLength = (tempMaxLength>aabb.zMax - aabb.zMin) ? (tempMaxLength) : (aabb.zMax - aabb.zMin); maxArray[threadIdx.x] = tempMaxLength; hashPoints[index] = make_float3((aabb.xMin + aabb.xMax) / 2, (aabb.yMin + aabb.yMax) / 2, (aabb.zMin + aabb.zMax) / 2); __syncthreads(); for (int i = blockDim.x / 2; i>0; i /= 2) { if (threadIdx.x < i) maxArray[threadIdx.x] = max(maxArray[threadIdx.x], maxArray[i + threadIdx.x]); __syncthreads(); } if (threadIdx.x == 0) maxLength[blockIdx.x] = maxArray[0]; } __global__ void calcHash_radix_q( uint2* gridParticleIndex, // output float3* posArray, // input: positions uint numParticles, float3 t_min, float3 t_max) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 pos = posArray[index]; uint hash; int gz = (pos.z - t_min.z) / dparam.triHashSize.z; int gy = (pos.y - t_min.y) / dparam.triHashSize.y; int gx = (pos.x - t_min.x) / dparam.triHashSize.x; if (gx < 0 || gx > dparam.triHashRes.x - 1 || gy < 0 || gy > dparam.triHashRes.y - 1 || gz < 0 || gz > dparam.triHashRes.z - 1) hash = CELL_UNDEF; else hash = __mul24(__mul24(gz, (int)dparam.triHashRes.y) + gy, (int)dparam.triHashRes.x) + gx; // store grid hash and particle index gridParticleIndex[index] = make_uint2(hash, index); } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStart_radix_q(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index uint3* sortedFaces, uint2 * gridParticleHash, // input: sorted grid hashes uint3* oldFaces, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index].x; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1].x; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleHash[index].y; sortedFaces[index] = oldFaces[sortedIndex]; // see particles_kernel.cuh } } __global__ void calculateNormal(float3* points, uint3* faces, float3* normals, int num) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index < num) { uint3 face = faces[index]; float3 v1 = points[face.x]; float3 v2 = points[face.y]; float3 v3 = points[face.z]; float3 tmp; tmp.x = (v1.y - v2.y)*(v1.z - v3.z) - (v1.z - v2.z)*(v1.y - v3.y); tmp.y = (v1.z - v2.z)*(v1.x - v3.x) - (v1.x - v2.x)*(v1.z - v3.z); tmp.z = (v1.x - v2.x)*(v1.y - v3.y) - (v1.y - v2.y)*(v1.x - v3.x); normals[index] = normalize(tmp); } } //temp_yanglp: __device__ float IntersectTriangle_q(float3& pos, float radius, float3& v0, float3& v1, float3& v2, float3 n) { //compute the distance of pos and triangle plane float d = dot(pos - v0, n); if (abs(d)>radius) return -1; float dislimit = radius*radius - d*d; // float3 pTri = pos - d*n; float3 tempcross; float d0 = dot(pTri - v0, pTri - v0); float d1 = dot(pTri - v1, pTri - v1); float d2 = dot(pTri - v2, pTri - v2); // int tt = (dot(cross(pTri - v0, v1 - v0), n)>0) ? 1 : 0; tt += (dot(cross(pTri - v1, v2 - v1), n)>0) ? 2 : 0; tt += (dot(cross(pTri - v2, v0 - v2), n)>0) ? 4 : 0; //cuPrintf("tt=%d\n",tt); if (tt == 7 || tt == 0) { return abs(d); } // float distemp; float dis = (d0<dislimit) ? (d0) : dislimit; //dis dis = (d1<dis) ? (d1) : dis; dis = (d2<dis) ? (d2) : dis; // if (dot(v1 - v0, pTri - v0)*dot(v0 - v1, pTri - v1)>0) { tempcross = cross(v1 - v0, pTri - v0); distemp = dot(tempcross, tempcross) / dot(v1 - v0, v1 - v0); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v2 - v1, pTri - v1)*dot(v1 - v2, pTri - v2)>0) { tempcross = cross(v2 - v1, pTri - v1); distemp = dot(tempcross, tempcross) / dot(v2 - v1, v2 - v1); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v0 - v2, pTri - v2)*dot(v2 - v0, pTri - v0)>0) { tempcross = cross(v0 - v2, pTri - v2); distemp = dot(tempcross, tempcross) / dot(v0 - v2, v0 - v2); dis = (distemp<dis) ? (distemp) : dis; } if (dis > dislimit - 0.001) return -1; return sqrt(dis + d*d); } // calculate address in grid from position (clamping to edges) __device__ uint calcGridHash_q(int3 gridPos) { return __umul24(__umul24(gridPos.z, dparam.triHashRes.y), dparam.triHashRes.x) + __umul24(gridPos.y, dparam.triHashRes.x) + gridPos.x; } // collide a particle against all other particles in a given cell __device__ float3 collideCell(int3 gridPos, float3 pos, float radius, float3* surPoints, uint3* surIndex, float3* surfaceNor, uint* cellStart, uint* cellEnd, int scene) { uint gridHash = calcGridHash_q(gridPos); float dis_n, wib = 0; float3 force = make_float3(0.0f); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != CELL_UNDEF) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j<endIndex; j++) { //cuPrintf("j=%d\n", j); dis_n = IntersectTriangle_q(pos, radius, surPoints[surIndex[j].x], surPoints[surIndex[j].y], surPoints[surIndex[j].z], surfaceNor[j]); wib = 1 - dis_n / radius; if (dis_n >= 0 && wib > 0.00001) { force += (radius - dis_n) * (surfaceNor[j]) * 10; } } } return force; } __device__ void mindis_cell(float& mindisair, float& mindisfluid, float3 gpos, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, int gidx, float radius) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { dis = length(pos[p] - gpos);// // dis = fabs(length(pos[p] - gpos))- radius;// mantaflow if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO)//todo: SOLOls mindisair = (dis<mindisair) ? dis : mindisair; else if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) mindisfluid = (dis<mindisfluid) ? dis : mindisfluid; } } //level set //[2012]MultiFLIP for Energetic Two-Phase Fluid Simulation __global__ void genlevelset(farray lsfluid, farray lsair, charray mark, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, float fMCDensity, float offset) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) // { //float ls; float h = dparam.cellsize.x; mark[idx] = TYPEVACUUM; float r = 0.5f*h; //0.36f*h; //float r = 0.5*sqrt(3.)*1.01*2.5; //0.5*1.01 mantaflow //get position int i, j, k; getijk(i, j, k, idx, NX, NY, NZ); float3 gpos = (make_float3(i, j, k) + make_float3(0.5f, 0.5f, 0.5f))*dparam.cellsize.x; // shifted by half cell float mindisair = 2.5f*h, mindisfluid = 2.5f*h; //2.5 cellsize //float mindisair = r, mindisfluid = r; // mindis- r mantaflow int level = 2; for (int di = -level; di <= level; ++di) for (int dj = -level; dj <= level; ++dj) for (int dk = -level; dk <= level; ++dk) //27 { if (verifycellidx(i + di, j + dj, k + dk)) { mindis_cell(mindisair, mindisfluid, gpos, pos, parflag, pmass, gridstart, gridend, getidx(i + di, j + dj, k + dk), r); } } mindisair -= r; mindisfluid -= r; lsfluid[idx] = mindisfluid; // lsair[idx] = mindisair - offset*h; //todo: lscorrectposmarkgridmark lsair[idx] = mindisair; } } __device__ void sumcell_fluidSolid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_fluidSolid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 0; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 0; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_air(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_air(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_solid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_solid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } // __global__ void cptdivergence_bubble(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls, farray sf) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float jx0, jx1, jy0, jy1, jz0, jz1, J; //surface tension, [2005]Discontinuous Fluids float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } J = (jx1 - jx0 + jy1 - jy0 + jz1 - jz0) / h / h; div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; div += J; //surfacetension } outdiv[idx] = div; } } // __global__ void cptdivergence_bubble2(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); //ux1 = airux(i+1,j,k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); //ux1 = airux(i+1,j,k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); //ux0 = airux(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); //ux0 = airux(i,j,k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); // uy0 = airuy(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); //uy0 = airuy(i,j,k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); //uz0 = airuz(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); //uz0 = airuz(i,j,k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } __global__ void cptdivergence_bubble3(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * waterux(i+1,j,k) + (1-theta) * airux(i+1,j,k); ux1 = airux(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * airux(i+1,j,k) + (1-theta) * waterux(i+1,j,k); ux1 = airux(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * waterux(i,j,k) + (1-theta) * airux(i,j,k); ux0 = airux(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * airux(i,j,k) + (1-theta) * waterux(i,j,k); ux0 = airux(i, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * wateruy(i,j+1,k) + (1-theta) * airuy(i,j+1,k); uy1 = airuy(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * airuy(i,j+1,k) + (1-theta) * wateruy(i,j+1,k); uy1 = airuy(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * wateruy(i,j,k) + (1-theta) * airuy(i,j,k); uy0 = airuy(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * airuy(i,j,k) + (1-theta) * wateruy(i,j,k); uy0 = airuy(i, j, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * wateruz(i,j,k+1) + (1-theta) * airuz(i,j,k+1); uz1 = airuz(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * airuz(i,j,k+1) + (1-theta) * wateruz(i,j,k+1); uz1 = airuz(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * wateruz(i,j,k) + (1-theta) * airuz(i,j,k); uz0 = airuz(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * airuz(i,j,k) + (1-theta) * wateruz(i,j,k); uz0 = airuz(i, j, k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } // __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_bubble(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op_bubble(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID || A[idx] == TYPEAIR) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } //(TYPEFLUID)AIR(AIRSOLO)CIP. __global__ void advectparticle_RK2_bubble(float3 *ppos, float3 *pvel, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIRSOLO) //AIRSOLO return; //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); char partype = parflag[idx]; //pos-->grid xyz float3 gvel = make_float3(0.0f); if (partype == TYPEFLUID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (partype == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); else //TYPEAIRSOLO return; if (velmode == CIP /*|| partype==TYPEAIR*/) //todo: cip ivel = gvel; else ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint; if (partype == TYPEFLUID) gvelmidpoint = getParticleVelFromGrid(midpoint, waterux, wateruy, wateruz); else gvelmidpoint = getParticleVelFromGrid(midpoint, airux, airuy, airuz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back: TYPEAIR+TYPESOLIDTYPESOLOreturnTYPEFLUID pvel[idx] = ivel; // if( partype==TYPEFLUID ) // ppos[idx] = ipos; } } __global__ void mapvelg2p_flip_bubble(float3 *ppos, float3 *vel, char* parflag, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = make_float3(0.0f); if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (parflag[idx] == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); vel[idx] += gvel; } } __global__ void compsurfacetension_k(farray sf, charray mark, farray phigrax, farray phigray, farray phigraz, float sigma) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) { int i, j, k; getijk(i, j, k, idx); float len, h = dparam.cellsize.x; float res, grax1, gray1, graz1, grax0, gray0, graz0; float3 phigracenter = make_float3(phigrax[idx], phigray[idx], phigraz[idx]); len = length(phigracenter); if (len == 0) res = 0; else { phigracenter /= len; if (verifycellidx(i + 1, j, k)) { len = length(make_float3(phigrax(i + 1, j, k), phigray(i + 1, j, k), phigraz(i + 1, j, k))); if (len == 0) grax1 = phigracenter.x; else grax1 = phigrax(i + 1, j, k) / len; } else grax1 = phigracenter.x; if (verifycellidx(i - 1, j, k)) { len = length(make_float3(phigrax(i - 1, j, k), phigray(i - 1, j, k), phigraz(i - 1, j, k))); if (len == 0) grax0 = phigracenter.x; else grax0 = phigrax(i - 1, j, k) / len; } else grax0 = phigracenter.x; if (verifycellidx(i, j + 1, k)) { len = length(make_float3(phigrax(i, j + 1, k), phigray(i, j + 1, k), phigraz(i, j + 1, k))); if (len == 0) gray1 = phigracenter.y; else gray1 = phigray(i, j + 1, k) / len; } else gray1 = phigracenter.y; if (verifycellidx(i, j - 1, k)) { len = length(make_float3(phigrax(i, j - 1, k), phigray(i, j - 1, k), phigraz(i, j - 1, k))); if (len == 0) gray0 = phigracenter.y; else gray0 = phigray(i, j - 1, k) / len; } else gray0 = phigracenter.y; if (verifycellidx(i, j, k + 1)) { len = length(make_float3(phigrax(i, j, k + 1), phigray(i, j, k + 1), phigraz(i, j, k + 1))); if (len == 0) graz1 = phigracenter.z; else graz1 = phigraz(i, j, k + 1) / len; } else graz1 = phigracenter.z; if (verifycellidx(i, j, k - 1)) { len = length(make_float3(phigrax(i, j, k - 1), phigray(i, j, k - 1), phigraz(i, j, k - 1))); if (len == 0) graz0 = phigracenter.z; else graz0 = phigraz(i, j, k - 1) / len; } else graz0 = phigracenter.z; res = (grax1 - grax0 + gray1 - gray0 + graz1 - graz0) / h * 0.5f; //res = (grax1-phigracenter.x + gray1-phigracenter.y + graz1-phigracenter.z) / h ; } sf[idx] = res*sigma; } else sf[idx] = 0; } } __global__ void enforcesurfacetension_p(float3* ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray sf, farray phigrax, farray phigray, farray phigraz, charray mark, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID/* || pflag[idx]==TYPEAIRSOLO*/ || pflag[idx] == TYPEFLUID) return; if( (scene != SCENE_MELTANDBOIL&&scene != SCENE_MELTANDBOIL_HIGHRES && pflag[idx] == TYPEAIRSOLO) || ((scene != SCENE_ALL && pflag[idx] == TYPEAIRSOLO))) return; //1. compute the cell, and get the ls, get sf. float3 ipos = ppos[idx]; float ilsmerge = getScaleFromFrid(ipos, lsmerge); float isf = getScaleFromFrid(ipos, sf); float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); float lendir = length(dir); if (lendir == 0) return; float3 f; dir /= lendir; ilsmerge /= lendir; // int i, j, k; getijkfrompos(i, j, k, ipos); int cnt = (mark(i, j, k) == TYPEAIR) ? 1 : 0; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (verifycellidx(i + di, j + dj, k + dk)) if (mark(i + di, j + dj, k + dk) == TYPEAIR) cnt++; if (cnt == 0) return; // if(abs(ls_p)<threshold), enforce a surface tension force, change the velocity. if (abs(ilsmerge)<dparam.cellsize.x) { f = -isf*dir; pvel[idx] += f*dparam.dt; } } } //levelset __global__ void markLS_bigpositive(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] / dparam.cellsize.x; if (ls[idx] >1.99f) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //sweep } else mark[idx] = TYPEFLUID; } } __global__ void setLSback_bigpositive(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] * dparam.cellsize.x; } } __global__ void preparels(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] / dparam.cellsize.x; if (ls[idx] >0) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //sweep } else mark[idx] = TYPEFLUID; } } __global__ void setLSback(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] * dparam.cellsize.x; } } __global__ void mergeLSAndMarkGrid(farray lsmerge, charray mark, farray lsfluid, farray lsair) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx< dparam.gnum) { float h = dparam.cellsize.x; if (lsair[idx] >4.99f * h) { lsmerge[idx] = lsfluid[idx]; if (lsfluid[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEFLUID; } else if (lsfluid[idx]>4.99f*h) { lsmerge[idx] = lsair[idx]; if (lsair[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEAIR; } else if (lsair[idx]>0.8f*h && lsfluid[idx]>0.8f*h) { mark[idx] = TYPEVACUUM; lsmerge[idx] = min(lsfluid[idx], lsair[idx]); } else { lsmerge[idx] = (lsfluid[idx] - lsair[idx])*0.5f; if (lsmerge[idx]>0) mark[idx] = TYPEAIR; else mark[idx] = TYPEFLUID; } //todo: ls int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY, lsmerge[idx] = -0.5f*h; //todo: debug: //lsmerge[idx] = -lsmerge[idx]; } } __global__ void sweepu_k_bubble(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray ls, charray mark, char sweepflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; // if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i - 1, j, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ -1) continue; wx = -di*(ls(i, j, k) - ls(i - 1, j, k)); if (wx<0) continue; wy = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j + dj, k) - ls(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j, k + dk) - ls(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i, j - 1, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(ls(i, j, k) - ls(i, j - 1, k)); if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j - 1, k) - ls(i + di, j, k) - ls(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (ls(i, j, k) + ls(i, j - 1, k) - ls(i, j, k + dk) - ls(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) != sweepflag && mark(i, j, k - 1) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(ls(i, j, k) - ls(i, j, k - 1)); if (wz<0) continue; wy = (ls(i, j, k) + ls(i, j, k - 1) - ls(i, j + dj, k) - ls(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j, k - 1) - ls(i + di, j, k) - ls(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } //"" __global__ void correctbubblepos(farray ls, farray phigrax, farray phigray, farray phigraz, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) // ipos = ipos - d*dir; else if (iflag == TYPEFLUID) { ipos = ipos - d*dir; dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; ipos = ipos + s*(rs - s*d)*dir; } // cnt++; } else if (iflag == TYPEFLUID && s*d<rs*0.5f && s*d >= 0) //todo: rs*0.5f0.5 { ipos = ipos + s*(rs - s*d)*dir; } ppos[idx] = ipos; } } //"". //ls __global__ void correctbubblepos_air(farray lsmerge, farray phigrax, farray phigray, farray phigraz, farray lsair, farray phigrax_air, farray phigray_air, farray phigraz_air, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsmerge) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) // ipos = ipos - d*dir; // cnt++; } if (iflag == TYPEFLUID) //level setlsmerge { dir = getVectorFromGrid(ipos, phigrax_air, phigray_air, phigraz_air); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsair) / dirlen; if (d<-1.3f*rs) ipos = ipos - (d - rs)*dir; } ppos[idx] = ipos; } } //levelset __global__ void computePhigra(farray phigrax, farray phigray, farray phigraz, farray ls) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float h = dparam.cellsize.x; float lsx1, lsx0, lsy1, lsy0, lsz1, lsz0, lscenter = ls[idx]; lsx1 = (verifycellidx(i + 1, j, k)) ? ls(i + 1, j, k) : lscenter; lsx0 = (verifycellidx(i - 1, j, k)) ? ls(i - 1, j, k) : lscenter; lsy1 = (verifycellidx(i, j + 1, k)) ? ls(i, j + 1, k) : lscenter; lsy0 = (verifycellidx(i, j - 1, k)) ? ls(i, j - 1, k) : lscenter; lsz1 = (verifycellidx(i, j, k + 1)) ? ls(i, j, k + 1) : lscenter; lsz0 = (verifycellidx(i, j, k - 1)) ? ls(i, j, k - 1) : lscenter; //todo: phigrax[idx] = ((lsx1 - lsx0)*0.5f) / h; phigray[idx] = ((lsy1 - lsy0)*0.5f) / h; phigraz[idx] = ((lsz1 - lsz0)*0.5f) / h; //phigrax[idx] = (lsx1-lscenter)/h; //phigray[idx] = (lsy1-lscenter)/h; //phigraz[idx] = (lsz1-lscenter)/h; } } __global__ void copyParticle2GL_phi(float3* ppos, char *pflag, float *pmass, float *pTemperature, int pnum, float *renderpos, float *rendercolor, farray ls, farray phigrax, farray phigray, farray phigraz, char typeflag, float Tmax, float Tmin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //todo: if (pflag[idx] == typeflag/* || ppos[idx].y<NY*0.5f*dparam.cellsize.x */) { renderpos[idx * 3] = -2.0f; renderpos[idx * 3 + 1] = 0.0f; renderpos[idx * 3 + 2] = 0.0f; float3 color = make_float3(0.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; return; } renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; float3 color; if (pflag[idx] == TYPEAIR) color = mapColorBlue2Red(0.0f); else if (pflag[idx] == TYPEFLUID) color = mapColorBlue2Red(2.0f); else if (pflag[idx] == TYPESOLID) color = mapColorBlue2Red(4.0f); else color = mapColorBlue2Red(6.0f); //color=mapColorBlue2Red( (pTemperature[idx]-Tmin)/(Tmax-Tmin)*6.0f ); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } //surface tension. [2005]Discontinuous Fluids __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz, farray sf, farray lsmerge, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; float J = 0.0f, theta; if (idx<dparam.gvnum.x) { J = 0.0f; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i - 1, j, k)) / (lsmerge(i, j, k) - lsmerge(i - 1, j, k)); J = theta*sf(i - 1, j, k) + (1.0f - theta)*sf(i, j, k); } ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k) - J) / h; } } if (idx<dparam.gvnum.y) { J = 0.0f; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j - 1, k)) / (lsmerge(i, j, k) - lsmerge(i, j - 1, k)); J = theta*sf(i, j - 1, k) + (1.0f - theta)*sf(i, j, k); } uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k) - J) / h; } } if (idx<dparam.gvnum.z) { J = 0.0f; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j, k - 1)) / (lsmerge(i, j, k) - lsmerge(i, j, k - 1)); J = theta*sf(i, j, k - 1) + (1.0f - theta)*sf(i, j, k); } uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1) - J) / h; } } } __global__ void sweepVacuum(charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] != TYPEAIR) return; //mark for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (mark(i + di, j + dj, k + dk) == TYPEVACUUM) mark[idx] = TYPEVACUUM; } } __global__ void markDeleteAirParticle(float3* ppos, char* pflag, float *pmass, uint *preservemark, int pnum, charray mark, farray lsmerge, farray lsair, uint *cnt) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { //fluid and solid particles are preserved, air and airsolo particles are verified. if (pflag[idx] == TYPESOLID) { preservemark[idx] = 1; return; } int i, j, k; getijkfrompos(i, j, k, ppos[idx]); if (pflag[idx] == TYPEFLUID) { float lsm = getScaleFromFrid(ppos[idx], lsmerge); float lsa = getScaleFromFrid(ppos[idx], lsair); if ( /*lsm>1.2f*dparam.cellsize.x || */lsa<-1.0*dparam.cellsize.x) preservemark[idx] = 0, cnt[0]++; else preservemark[idx] = 1; return; } int cnt = 0; for (int di = -1; di <= 1; di += 1) for (int dj = -1; dj <= 1; dj += 1) for (int dk = -1; dk <= 1; dk += 1) if (verifycellidx(i + di, j + dj, k + dk) && mark(i + di, j + dj, k + dk) == TYPEVACUUM) cnt++; if (cnt == 0 && pmass[idx]>0.000001f) //notice: preservemark[idx] = 1; else preservemark[idx] = 0; } } // compact voxel array __global__ void deleteparticles(uint *preserveflag, uint *preserveflagscan, int pnum, float3 *outpos, float3 *pos, float3 *outvel, float3 *vel, float *outmass, float* mass, char *outflag, char *flag, float *outTemperature, float *temperature, float *outheat, float *heat, float *outsolubility, float *solubility, float *outgascontain, float *gascontain) { uint idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (preserveflag[idx] == 1) { //deleteflagscan "". uint outidx = preserveflagscan[idx]; outpos[outidx] = pos[idx]; outvel[outidx] = vel[idx]; outmass[outidx] = mass[idx]; outflag[outidx] = flag[idx]; outTemperature[outidx] = temperature[idx]; outheat[outidx] = heat[idx]; outsolubility[outidx] = solubility[idx]; outgascontain[outidx] = gascontain[idx]; } } } __device__ int cntairparticle(float3 *ppos, char *pflag, int igrid, uint *gridstart, uint *gridend, const float3 &ipos, float r) { uint start = gridstart[igrid]; int res = 0; float dis; if (start == CELL_UNDEF) return res; for (int p = start; p<gridend[igrid]; p++) { dis = length(ppos[p] - ipos); if (dis<r && (pflag[p] == TYPEAIR || pflag[p] == TYPEAIRSOLO)) { ++res; } } return res; } __device__ inline bool isInBoundaryCell(int x, int y, int z) { int level = 2; if (x <= level || x >= NX - 1 - level || y <= level || y >= NY - 1 - level) return true; else return false; } __global__ void verifySoloAirParticle(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray airux, farray airuy, farray airuz, uint *gridstart, uint *gridend, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; if (iflag == TYPEFLUID || iflag == TYPESOLID) //TYPEAIR, TYPEAIRSOLO can go on. return; float3 ipos = ppos[idx]; float ls = getScaleFromFrid(ipos, lsmerge); float h = dparam.cellsize.x; int i, j, k; getijkfrompos(i, j, k, ipos); //a key adjustment, the tolerent will affect the result directly. int cnt = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) cnt += cntairparticle(ppos, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, h); float tol1 = -1.45f, tol2 = -0.5f; if (scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene==SCENE_ALL) tol1 = 0.05f, tol2 = -0.8f; else if (scene == SCENE_INTERACTION) tol1 = 0.2f, tol2 = -0.5f; if ((cnt >= 10 || ls>tol1*h) && pflag[idx] == TYPEAIRSOLO && !isInBoundaryCell(i, j, k)) //decide whether the air solo particle should be transfered to air particle. { if (cnt >= 3) pflag[idx] = TYPEAIR; } else if (iflag == TYPEAIR && (isInBoundaryCell(i, j, k) || ls<tol2*h || cnt <= 1)) { //todo: or not??? //pvel[idx]= pvel[idx]*0.8f + 0.2f*getParticleVelFromGrid(ipos,airux,airuy,airuz); pvel[idx] = getParticleVelFromGrid(ipos, airux, airuy, airuz); pflag[idx] = TYPEAIRSOLO; } } } __device__ float sumdensity(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { // notice: should include liquid particle, not just spray particle. if (pflag[p] != TYPEAIR && pflag[p] != TYPEAIRSOLO) continue; dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); //todo: m0 or pmass[p]? } return res; } __global__ void calcDensPress_Air(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIR && pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.airm0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho*0.5f); } } __device__ float3 sumforce(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h && (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR)) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSoloAirP(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH, float maxVelForBubble) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO && pflag[idx] != TYPEAIR) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); //todo: ?? force *= dparam.airm0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; //restrict the vel below a threshold. // if( length(ivel) > maxVelForBubble ) // ivel = normalize(ivel) * maxVelForBubble; // // advect particle, using rho!!!! // ppos[idx]=ipos; pvel[idx] = ivel; } } __device__ float sumdensity_SLCouple(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); } return res; } //solid-liquid coupling, in SPH framework __global__ void calcDensPressSPH_SLCouple(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity_SLCouple(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.m0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho); } } __device__ float3 sumforce_SLCouple(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, kvis=0.0f; if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSPH_SLCouple(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEFLUID) //fluidsolid return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce_SLCouple(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); // force=make_float3(0.0f); //todo: ?? //add gravity here? or external force part; force *= dparam.m0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; // advect particle, using rho!!!! ppos[idx] = ipos; pvel[idx] = ivel; } } __global__ void updateFixedHeat(farray fixedHeat, int frame) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= NX / 4 && i<NX*0.75 && j >= NY / 4 && j<NY*0.75 && k <= 3 /*k<=20 && k>=19*/) fixedHeat[idx] = 273.0f + 100.0f * min(frame / 40.f, 1.0f); else fixedHeat[idx] = UNDEF_TEMPERATURE; } } __global__ void addHeatAtBottom(farray Tp, int frame, float heatIncreaseBottom) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= 1 && i<NX - 1 && j >= 1 && j<NY - 1 && k <= 3 /*k<=20 && k>=19*/) Tp[idx] += heatIncreaseBottom;//1.5f; //Tp[idx] = 350.0f;//273.0f + 100.0f * min(frame/40.f, 1.0f ); Tp[idx] = min(378.0f, Tp[idx]); } } // __global__ void compb_heat(farray Tp_old, farray Tp, farray fixedheat, charray mark, float *heatAlphaArray) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float alpha = heatAlphaArray[mark[idx]]; //tpbfixedheat // if( fixedheat[idx]!=UNDEF_TEMPERATURE ) // Tp[idx]=fixedheat[idx], Tp_old[idx] = fixedheat[idx]*dparam.cellsize.x*dparam.cellsize.x/alpha/dparam.dt; // else Tp_old[idx] = Tp[idx] * dparam.cellsize.x*dparam.cellsize.x / alpha / dparam.dt; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_heat(farray ans, charray mark, farray x, int n, float *heatAlphaArray, farray fixedHeat, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { float h = dparam.cellsize.x; float dt = dparam.dt; float alpha = heatAlphaArray[mark[idx]]; if (mark[idx] != TYPEBOUNDARY/* && mark[idx]!=TYPEVACUUM*/) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = (h*h / alpha / dt + 6.0f)*center; //trick: freeair if (scene == SCENE_BOILING || scene == SCENE_BOILING_HIGHRES || scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene ==SCENE_ALL) { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY || mark(i + 1, j, k) == TYPEVACUUM) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY || mark(i, j + 1, k) == TYPEVACUUM) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY || mark(i, j, k + 1) == TYPEVACUUM) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY || mark(i - 1, j, k) == TYPEVACUUM) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY || mark(i, j - 1, k) == TYPEVACUUM) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY || mark(i, j, k - 1) == TYPEVACUUM) ? center : x(i, j, k - 1)); } else { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1)); } ans[idx] = sum; } } } //Ans = x + a*y __global__ void pcg_op_heat(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { // if( A[idx]==TYPEFLUID || A[idx]==TYPEAIR ) if (A[idx] != TYPEBOUNDARY) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void setBoundaryHeat(farray tp) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == NX - 1) tp[idx] = tp(i - 1, j, k); else if (i == 0) tp[idx] = tp(i + 1, j, k); else if (j == NY - 1) tp[idx] = tp(i, j - 1, k); else if (j == 0) tp[idx] = tp(i, j + 1, k); else if (k == NZ - 1) tp[idx] = tp(i, j, k - 1); else if (k == 0) tp[idx] = tp(i, j, k + 1); } } __global__ void compTpChange(farray tp, farray tpsave, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) tpsave[idx] = tp[idx] - tpsave[idx]; else tpsave[idx] = 0; } } __device__ void sumHeat(float &heatsum, float &weight, float3 gpos, float3 *pos, float *pTemperature, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = sharp_kernel(dis2, RE); weight += w; heatsum += w*pTemperature[p]; } } __global__ void mapHeatp2g_hash(float3 *ppos, float *pTemperature, int pnum, farray heat, uint* gridstart, uint *gridend, float defaulttemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; float weight = 0.0f, heatsum = 0; float3 gpos; getijk(i, j, k, idx); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumHeat(heatsum, weight, gpos, ppos, pTemperature, gridstart, gridend, getidx(i + di, j + dj, k + dk)); heatsum = (weight>0) ? (heatsum / weight) : defaulttemperature; heat(i, j, k) = heatsum; } } __global__ void mapHeatg2p(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; pTemperature[idx] = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. } } __global__ void mapHeatg2p_MeltAndBoil(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float newtemp = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. if (parflag[idx] == TYPESOLID) pTemperature[idx] = 0.95f*(pTemperature[idx]) + 0.05f*newtemp; else pTemperature[idx] = newtemp; } } __global__ void initHeatParticle(float *pTemperature, float *pHeat, float defaultSolidT, float defaultLiquidT, float LiquidHeatTh, char *pflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) { pTemperature[idx] = defaultSolidT; pHeat[idx] = 0; } else { pTemperature[idx] = defaultLiquidT; pHeat[idx] = LiquidHeatTh; } } } //Temperature0=273.15K, Solubility0=1.0f (1) __global__ void initsolubility_k(float *psolubility, float* pgascontain, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate, float initgasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID || pflag[idx] == TYPESOLID) { psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. pgascontain[idx] = initgasrate*psolubility[idx]; } else { psolubility[idx] = 0; pgascontain[idx] = 0; } } } //Temperature0=273.15K, Solubility0=1.0f (1) __global__ void updatesolubility(float *psolubility, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID) psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. } } //addparnums0 __global__ void GenerateGasParticle_k(float *psolubility, float *paircontain, float3 *ppos, float3 *pvel, float *pmass, char *pflag, float *pTemperature, float *pLHeat, int pnum, uint *gridstart, uint *gridend, int *addparnums, float *randfloat, int randcnts, int frame, farray gTemperature, float LiquidHeatTh, int *seedcell, int seednum, float vaporGenRate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { float gcontain = 0, gsolubility = 0, gairexist = 0; int liquidParCnt = 0, gasParCnt = 0; float airparticlemass0 = dparam.airm0; //todo float vaporsum = 0;//, vaporrate = 0.01f; float3 gaspos = make_float3(0), gasvel = make_float3(0); int i, j, k; getijk(i, j, k, idx); if (k <= 1 || isInBoundaryCell(i, j, k)) return; // float3 gpos = make_float3(i, j, k)*dparam.cellsize.x; uint start = gridstart[idx]; if (start == CELL_UNDEF) return; //1. for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; vaporsum += max(0.0f, pLHeat[p] - LiquidHeatTh) * vaporGenRate * airparticlemass0; liquidParCnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; gaspos += ppos[p]; gasvel += pvel[p]; gasParCnt++; } } bool hasseed = false; for (int i = 0; i<seednum; i++) if (seedcell[i] == idx) hasseed = true; // int addcnt = 0; int randbase = (idx*frame) % (randcnts - 200); //randpos and randfloat are in [0,1] float3 randpos = make_float3(randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts]); float randnum = randfloat[(randbase + addcnt++) % randcnts]; float r = dparam.cellsize.x * 0.25f; if (gcontain - gsolubility + vaporsum > airparticlemass0 && (hasseed || gasParCnt>0)) { int addindex = atomicAdd(&addparnums[0], 1) + pnum; pmass[addindex] = airparticlemass0;//dparam.m0; //todo: if (gasParCnt>0) { ppos[addindex] = gaspos / gasParCnt + (max(0.5f, randnum)*r) * (randpos - make_float3(0.5f)) * 2; // pvel[addindex] = make_float3(0.0f);//gasvel/gasParCnt; // } else { ppos[addindex] = gpos + dparam.cellsize.x*randpos; pvel[addindex] = make_float3(0.0f); } pflag[addindex] = TYPEAIRSOLO; pTemperature[addindex] = gTemperature[idx]; // pLHeat[addindex] = 0; //heat paircontain[addindex] = 0.0f; psolubility[addindex] = 0.0f; // for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { paircontain[p] = min(paircontain[p], psolubility[p]); pLHeat[p] = min(pLHeat[p], LiquidHeatTh); //todo: decrease the liquids mass. } } } } } //addparnums0 __global__ void updatebubblemass(float *psolubility, float *paircontain, float3 *ppos, float *pmass, char *pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum / 8) //8 { float gcontain = 0, gsolubility = 0, gairexist = 0; int fpcnt = 0, apcnt = 0; float airparticlemass0 = dparam.airm0; //todo int i, j, k; getijk(i, j, k, idx, NX / 2, NY / 2, NZ / 2); i *= 2, j *= 2, k *= 2; // float3 gpos; int gidx; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { gidx = getidx(i + di, j + dj, k + dk); // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; if (gridstart[gidx] == CELL_UNDEF) continue; //1. for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; fpcnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; apcnt++; } } } //2. float maxradius = 1.5f*dparam.cellsize.x; float maxmass = getMassfromR(maxradius); float massaddlimit = 3.0f*dparam.airm0; //3 float addmass; if (gcontain>gsolubility) { //todo: if (abs(gcontain - gsolubility) < 2.5*airparticlemass0/*1.3f*gsolubility*/) // return; //2.1: float needadd = gcontain - gsolubility; if (apcnt>0) { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { addmass = min(massaddlimit, maxmass - pmass[p]); addmass = max(0.0f, min(needadd, addmass)); needadd -= addmass; // pmass[p] += addmass; if (needadd <= 0) break; } } } } //2.3: float actualadd = gcontain - gsolubility - needadd, eachchange; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (actualadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (actualadd <= 0) break; if (pflag[p] == TYPEFLUID) { if (paircontain[p] - psolubility[p]>0) { eachchange = min(actualadd, paircontain[p] - psolubility[p]); paircontain[p] -= eachchange; actualadd -= eachchange; } } } } } //end if( gcontain>gsolubility ) else if (gairexist>0) //3: { //todo: if (abs(gcontain - gsolubility) < 3.6f*airparticlemass0/*1.3f*gsolubility*/) // return; //3.1: float needminus = gsolubility - gcontain; // float masschangesum = 0; // if (gairexist<needminus) needminus = gairexist; if (needminus>0)//minus some of them to 0 mass, use another kernel to delete it. { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needminus <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && needminus>0; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { float masschange = min(pmass[p], needminus); // pmass[p] -= masschange; needminus -= masschange; masschangesum += masschange; } } } } //3.2: . change the fluid particls. for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (masschangesum <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && masschangesum>0; p++) { if (pflag[p] == TYPEFLUID) { float containchange = min(max(0.0f, psolubility[p] - paircontain[p]), masschangesum); // paircontain[p] += containchange; masschangesum -= containchange; } } } } } } //emptyAIR //markgrid, correctpos, heattransfer. __global__ void updateEmptyBubbles(float3 *pepos, float3 *pedir, float *peradius, int penum, float3 *parpos, float3 *parvel, float *parmass, float* parTemperature, char *parflag, float *parsolubility, float *paraircontain, int parnum, int *addparnums, uint *gridstart, uint *gridend, farray gTemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<penum) { int airRscale = 2; float3 ipos = pepos[idx], idir = pedir[idx]; float iradius = peradius[idx]; float rthresholdleave = 1.0f*dparam.cellsize.x; //todo: // float rthreshold = max(0.0f, iradius + 0.1f*dparam.cellsize.x); // rthreshold = min(rthreshold, rthresholdleave); int i, j, k; getijkfrompos(i, j, k, ipos); // float massorigin = dparam.waterrho * 4 / 3 * M_PI*(pow(iradius, 3))*0.5; float masscantake = dparam.waterrho * 4 / 3 * M_PI*(pow(rthreshold, 3) - pow(iradius, 3))*0.5, massadd = 0; //todo int range = 2; for (int di = -range; di <= range &&masscantake>0; di++) for (int dj = -range; dj <= range&&masscantake>0; dj++) for (int dk = -range; dk <= range&&masscantake>0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i, j, k); for (uint p = gridstart[grididx]; p<gridend[grididx] && masscantake>0; p++) // { if (parflag[p] != TYPEFLUID) continue; float gasreslease = max(0.0f, paraircontain[p] - parsolubility[p]); if (gasreslease <= 0) continue; gasreslease = min(gasreslease, masscantake); massadd += gasreslease; masscantake -= gasreslease; //paraircontain[p] -= gasreslease; } } float newiradius = pow((massadd + massorigin) / dparam.waterrho / 4 * 3 / M_PI, 1.0 / 3); ipos += (newiradius - iradius)*idir; float ss = dparam.samplespace; if (newiradius + 1e-5 >= rthresholdleave) // { int num = ceil(newiradius / ss); for (float x = -num*ss; x <= newiradius; x += ss)for (float y = -num*ss; y <= newiradius; y += ss)for (float z = -num*ss; z <= newiradius; z += ss) { if (x*x + y*y + z*z>newiradius*newiradius) continue; int addindex = atomicAdd(&addparnums[0], 1) + parnum; parmass[addindex] = dparam.airm0; //todo: parpos[addindex] = ipos + make_float3(x, y, z); parflag[addindex] = TYPEAIR; parvel[addindex] = make_float3(0.0f); parTemperature[addindex] = gTemperature[getidx(i, j, 1)]; //todo: paraircontain[addindex] = 0.0f; parsolubility[addindex] = 0.0f; } ipos.z = 1.1f*dparam.cellsize.x; // newiradius = 0; } peradius[idx] = newiradius; pepos[idx] = ipos; } } __device__ void mat4_mul(matrix4* dst, const matrix4* m0, const matrix4* m1) { int row; int col; int i; for (row = 0; row < 4; row++) for (col = 0; col < 4; col++) for (i = 0; i < 4; i++) dst->m[row * 4 + col] += m0->m[row * 4 + i] * m1->m[i * 4 + col]; } __device__ void mat4_mulvec3_as_mat3(float3* dst, const matrix4* m, const float3* v) { float new_x; float new_y; float new_z; new_x = v->x*m->m[0 + 4 * 0] + v->y*m->m[0 + 4 * 1] + v->z*m->m[0 + 4 * 2]; new_y = v->x*m->m[1 + 4 * 0] + v->y*m->m[1 + 4 * 1] + v->z*m->m[1 + 4 * 2]; new_z = v->x*m->m[2 + 4 * 0] + v->y*m->m[2 + 4 * 1] + v->z*m->m[2 + 4 * 2]; dst->x = new_x; dst->y = new_y; dst->z = new_z; } __global__ void MeltingSolidByHeat(float *pTemperature, float *pLHeat, char *pflag, int pnum, float LiquidHeatTh, float meltTemperature, int *numchange) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]>LiquidHeatTh) { pflag[idx] = TYPEFLUID; pLHeat[idx] = LiquidHeatTh; atomicAdd(&numchange[0], 1); } } } __global__ void FreezingSolidByHeat(float3* ppos, float *pLHeat, char *pflag, int pnum, int *numchange, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPEFLUID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]<0) { //determine a new position which is appropriate for solid. // int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float mindis = 1000; int minidx = -1; int width = 1; int cntsolid = 0; float h = dparam.cellsize.x; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start == CELL_UNDEF) continue; for (int p = start; p<gridend[gidx]; p++) { if (pflag[p] == TYPESOLID) { float dis = length(ppos[p] - ipos); if (dis< h) cntsolid++; if (length(ppos[p] - ipos)<mindis) mindis = length(ppos[p] - ipos), minidx = p; } } } if (minidx != -1 && mindis<dparam.cellsize.x && cntsolid>2)// { pflag[idx] = TYPESOLID; pLHeat[idx] = 0; atomicAdd(&numchange[0], 1); if (mindis > dparam.samplespace) { ipos = normalize(ipos - ppos[minidx])*dparam.samplespace + ppos[minidx]; ppos[idx] = ipos; } } } } } //air solo particledrag forcedragparam __global__ void calDragForce(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray ux, farray uy, farray uz, float dragparamsolo, float dragparamgrid, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx], ivel = pvel[idx]; //compute the grid index int i, j, k; getijkfrompos(i, j, k, ipos); //compute drag "force" (actually not "force", is velocity change, tuning alpha is very important) float3 gridvel = getParticleVelFromGrid(ipos, ux, uy, uz); float3 gridpos = make_float3(i, j, k); float3 dragf_b = dragparamsolo * length(gridvel - ivel) * (gridvel - ivel); //grid's velocitybubble1 /* float alpha = 0.5f;*/ float3 velChange_g = -dragf_b*dragparamgrid*dparam.dt; // //update for grid float ux0, ux1, uy0, uy1, uz0, uz1; float3 weight = ipos / dparam.cellsize.x - gridpos; // in [0-1] ux0 = velChange_g.x*(1 - weight.x), ux1 = velChange_g.x*weight.x; uy0 = velChange_g.y*(1 - weight.y), uy1 = velChange_g.y*weight.y; uz0 = velChange_g.z*(1 - weight.z), uz1 = velChange_g.z*weight.z; atomicAdd(&(ux.data[getidx(i, j, k, NX + 1, NY, NZ)]), ux0); atomicAdd(&(ux.data[getidx(i + 1, j, k, NX + 1, NY, NZ)]), ux1); atomicAdd(&(uy.data[getidx(i, j, k, NX, NY + 1, NZ)]), uy0); atomicAdd(&(uy.data[getidx(i, j + 1, k, NX, NY + 1, NZ)]), uy1); atomicAdd(&(uz.data[getidx(i, j, k, NX, NY, NZ + 1)]), uz0); atomicAdd(&(uz.data[getidx(i, j, k + 1, NX, NY, NZ + 1)]), uz1); //update for particletodoInteraction if (scene == SCENE_INTERACTION || scene == SCENE_INTERACTION_HIGHRES) pvel[idx] += dragf_b*dparam.dt; } } __global__ void accumulate_GPU_k(int num, float3* out, float3* a)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float* b)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float3* b) { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k_float(int num, float* out, float* a)//dsum, a.data, flag, n { extern __shared__ float fddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; fddata[tid] = (i >= num) ? 0 : a[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) fddata[tid] += fddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = fddata[0]; } __global__ void compute_cI_k(int pnum, char* parflag, float3 *parPos, float3 *parVel, float3* c, float3* weight, float3 rg) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID) { float dis = length(parPos[idx] - rg); if (dis>1e-6) { c[idx] = cross(parPos[idx] - rg, parVel[idx]); weight[idx] = make_float3(dis, 0, 0); } else c[idx] = weight[idx] = make_float3(0); } else { c[idx] = weight[idx] = make_float3(0); //c[idx] = make_float3(0,0,0); } } } __global__ void setVelZeroSolid_k(float3 *parvel, char *parflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) parvel[idx] = make_float3(0); } __global__ void computeVelSolid_k(float3* parPos, char* parflag, float3* parVel, int pnum, float3 rg, float3 R, float3 T) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 v_half = cross(R, parPos[idx] - rg); //` v_half += T; // v_half = 0.5*(parVel[idx] + v_half); parVel[idx] = v_half; // parVel[idx] = make_float3(0); } } __device__ inline float3 transposeParticle(float3 p, matrix3x3 rm) { float3 res; res.x = p.x*rm.x00 + p.y*rm.x10 + p.z*rm.x20; res.y = p.x*rm.x01 + p.y*rm.x11 + p.z*rm.x21; res.z = p.x*rm.x02 + p.y*rm.x12 + p.z*rm.x22; return res; } //rotation matrix "rm" __global__ void computePosSolid_k(float3* parvel, float3* parPos, char* parflag, int pnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 transp = parPos[idx] - rg0; transp = transposeParticle(transp, rm); parPos[idx] = transp + rg; //if (length(parPos[idx])<10.5) //parPos[idx] -= parvel[idx] * 0.00001; } } __global__ void computeSolidVertex_k(float3* vertexpos, int vnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<vnum) { float3 transp = vertexpos[idx] - rg0; transp = transposeParticle(transp, rm); vertexpos[idx] = transp + rg; } } __global__ void set_nonsolid_2_zero(char* pflag, int pnum, float3* Pos, float3* Vel) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] != TYPESOLID) { Pos[idx] = make_float3(0, 0, 0); Vel[idx] = make_float3(0, 0, 0); //Mass[idx] = 0.; } } //fluid, air, airsolosolidsolid __global__ void CollisionWithSolid_k(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, farray sux, farray suy, farray suz, SCENE scene, float bounceVelParam, float bouncePosParam) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 0.5f) // { float3 svel = getParticleVelFromGrid(ipos, sux, suy, suz); float3 rvel = ivel - svel; float d = dparam.cellsize.x * 0.5f; float3 phigrad; phigrad.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); phigrad.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); phigrad.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); if (length(phigrad) > 0) { phigrad = normalize(phigrad); // if (dot(rvel, phigrad)<0 || scene == SCENE_FREEZING) // { ivel -= bounceVelParam * dot(rvel, phigrad)*phigrad; // if (scene == SCENE_FREEZING) ivel -= 0.1f* (rvel - dot(rvel, phigrad)*phigrad); // } ipos += bouncePosParam * phigrad * (0.5f - iphi) * dparam.cellsize.x; } } // ipos += ivel*dparam.dt; // float rate = 0.5f, ratevel = -0.5f; if (pflag[idx] == TYPEAIRSOLO) rate = 0.8f, ratevel = -0.5f; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); // if( ipos.x>tmax.x ) // ivel.x *=ratevel, ipos.x=tmax.x; // if( ipos.x<tmin.x ) // ivel.x *= ratevel, ipos.x=tmin.x; // if( ipos.y>tmax.y ) // ivel.y *=ratevel, ipos.y=tmax.y; // if( ipos.y<tmin.y ) // ivel.y *= ratevel, ipos.y=tmin.y; // if( ipos.z>tmax.z ) // ivel.z *=ratevel, ipos.z=tmax.z; // if( ipos.z<tmin.z ) // ivel.z *= ratevel, ipos.z=tmin.z; if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; // pvel[idx] = ivel; ppos[idx] = ipos; } } //melting and freezingfluid, air, airsolosolidsolid __global__ void CollisionWithSolid_Freezing(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, uint* gridstart, uint* gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 1.0f) // { float r = 0.25f*dparam.cellsize.x; float3 collisionpos = make_float3(0), dir; float depth = 0, dis, adhesionDis = 0; int cntcollide = 0, cntadhesion = 0; float h = 4 * r; for (int di = -1; di <= 1; di++)for (int dj = -1; dj <= 1; dj++)for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i + di, j + dj, k + dk); int start = gridstart[grididx]; if (start == CELL_UNDEF) continue; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<2 * r) // { collisionpos += ppos[p]; depth = max(depth, 2 * r - dis); cntcollide++; } else if (dis< h) { adhesionDis += dis; cntadhesion++; } } } } float3 n; float d = dparam.cellsize.x * 0.5f; n.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); n.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); n.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); float3 originalvel = ivel; if (length(n) > 0) { n = normalize(n); // if (cntcollide>0) // { collisionpos /= cntcollide; if (length(n) > 0) { //correct vel and pos; ivel -= dot(originalvel, n)*n; // //ivel *= 1.1f; ipos += depth * n; } } else if (cntadhesion>0) // { float alpha = 0.1f; ivel -= n * alpha * length(ivel); } } } // // float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); if (ipos.x>tmax.x) ivel.x *= -0.5f, ipos.x = tmax.x; if (ipos.x<tmin.x) ivel.x *= -0.5f, ipos.x = tmin.x; if (ipos.y>tmax.y) ivel.y *= -0.5f, ipos.y = tmax.y; if (ipos.y<tmin.y) ivel.y *= -0.5f, ipos.y = tmin.y; if (ipos.z>tmax.z) ivel.z *= -0.5f, ipos.z = tmax.z; if (ipos.z<tmin.z) ivel.z *= -0.5f, ipos.z = tmin.z; ipos += ivel*dparam.dt; // pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void buoyancyForSolid(float3 *ppos, float3 *pvel, char *pflag, int pnum, uint *gridstart, uint *gridend, float SolidBuoyanceParam) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { int cnt = 0; int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float r = dparam.cellsize.x; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start != CELL_UNDEF) { for (uint p = start; p<gridend[gidx]; p++) if (pflag[p] == TYPEFLUID && length(ppos[p] - ipos)<r) cnt++; } } } if (cnt>2) pvel[idx].z += (dparam.waterrho - dparam.solidrho) * SolidBuoyanceParam * dparam.dt; } } __global__ void solidCollisionWithBound(float3 *ppos, float3 *pvel, char *pflag, int pnum, float SolidbounceParam, int nSolPoint) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //check position float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; //float eps=1e-6; // //() if (ipos.x<tmin.x) ivel.x += (tmin.x - ipos.x) * SolidbounceParam * nSolPoint; if (ipos.x>tmax.x) ivel.x -= (ipos.x - tmax.x) * SolidbounceParam * nSolPoint; if (ipos.y<tmin.y) ivel.y += (tmin.y - ipos.y) * SolidbounceParam * nSolPoint; if (ipos.y>tmax.y) ivel.y -= (ipos.y - tmax.y) * SolidbounceParam * nSolPoint; if (ipos.z<tmin.z) ivel.z += (tmin.z - ipos.z) * SolidbounceParam * nSolPoint; if (ipos.z>tmax.z) ivel.z -= (ipos.z - tmax.z) * SolidbounceParam * nSolPoint; pvel[idx] = ivel; //ppos[idx]=ipos; // } } //there is a problem here, remember to solve it. // __global__ void genAirFromSolid_k( float3 *ppos, float3 *pvel, char *pflag, float *psolubility, float *paircontain, float *pmass, float *pTemperature,int pnum, // charray lsmark, farray phisolid, farray Tgrid, int *addnum, float *randfloat, int nrandnum, int frame ) // { // int idx=__mul24( blockIdx.x, blockDim.x )+threadIdx.x; // if( idx<dparam.gnum &&lsmark[idx]==TYPEFLUID && phisolid[idx]>0 ) // // { // int i,j,k; // getijk( i,j,k,idx); // bool flag=false; // for( int di=-1; di<=1; di++ ) for( int dj=-1; dj<=1; dj++ ) for( int dk=-1; dk<=1; dk++ ) // { // if(verifycellidx(i+di,j+dj,k+dk) && phisolid( i+di,j+dj,k+dk)<0 ) // flag=true; // } // if( !flag ) // return; // // int cnt= (idx*frame) % ( nrandnum-100 ); // if( randfloat[cnt++]>0.95 ) //if randnum>thresold, generate a airsolo bubble // { // int addidx=atomicAdd( addnum, 1 ); // float3 addpos= (make_float3(randfloat[cnt], randfloat[cnt], randfloat[cnt]) + make_float3(i,j,k) ) * dparam.cellsize.x; // ppos[pnum+addidx] = addpos; // pvel[pnum+addidx]=make_float3(0); // pflag[pnum+addidx]=TYPEAIRSOLO; // psolubility[pnum+addidx]=0; // paircontain[pnum+addidx]=0; // pmass[pnum+addidx]=dparam.airm0; // pTemperature[pnum+addidx]=getScaleFromFrid( addpos, Tgrid ); // } // } // } //latent heat()latent heatlatent heatphase change. __global__ void updateLatentHeat_k(float *parTemperature, float *parLHeat, char *partype, int pnum, float meltingpoint, float boilingpoint, float LiquidHeatTh) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (partype[idx] == TYPESOLID && parTemperature[idx]>meltingpoint) { parLHeat[idx] += parTemperature[idx] - meltingpoint; parTemperature[idx] = meltingpoint; } if (partype[idx] == TYPEFLUID) { if (parTemperature[idx]<meltingpoint) { parLHeat[idx] -= meltingpoint - parTemperature[idx]; parTemperature[idx] = meltingpoint; } else if (parTemperature[idx]>boilingpoint) { parLHeat[idx] += parTemperature[idx] - boilingpoint; // parLHeat[idx] = min( parLHeat[idx], LiquidHeatTh+5 ); parTemperature[idx] = boilingpoint; } else parLHeat[idx] = LiquidHeatTh; } } } __global__ void pouringwater(float3* pos, float3* vel, float* parmass, char* parflag, float *ptemperature, float *pLHeat, float *pGasContain, int parnum, float3 *ppourpos, float3 *ppourvel, char pourflag, int pournum, float *randfloat, int randnum, int frame, float posrandparam, float velrandparam, float defaultLiquidT, float LiquidHeatTh) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pournum) { // int randbase = (frame + idx) % (randnum - 6); float3 randvel = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; randbase += 3; float3 randpos = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; pos[parnum + idx] = ppourpos[idx] + randpos * posrandparam*dparam.samplespace; vel[parnum + idx] = ppourvel[idx] + randvel * velrandparam; parmass[parnum + idx] = dparam.m0; parflag[parnum + idx] = pourflag; ptemperature[parnum + idx] = defaultLiquidT; pLHeat[parnum + idx] = LiquidHeatTh; pGasContain[parnum + idx] = 0; } } inline __device__ float getlen(float x, float y) { return sqrt(x*x + y*y); } __global__ void initheat_grid_k(farray tp, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float x = i, z = k; float r = NX*0.15; if (getlen(x - NX / 4, z - NZ / 4) <= r) tp[idx] = 100, mark[idx] = TYPESOLID; else if (getlen(x - NX / 4 * 3, z - NZ / 4 * 3) <= r) tp[idx] = 0, mark[idx] = TYPEFLUID; else if (z<NZ / 2) tp[idx] = 20, mark[idx] = TYPEVACUUM; else tp[idx] = 80, mark[idx] = TYPEAIR; } } __global__ void set_softparticle_position(float3* solidParPos, float3* mParPos, float3* solidParVelFLIP,float3* mParVel, char* partype) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) if (partype[idx]==TYPESOLID) { mParPos[idx] = solidParPos[idx]; mParVel[idx] = (solidParVelFLIP[idx]+mParVel[idx])/2.0; // mParVel[idx] = solidParVelFLIP[idx]; } }; //****************************************LBM algorithm***************************** __global__ void initLBMfield_k(farray ux, farray uy,farray uz, charray mark, farray f0, farray rho0) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) //for(int idx=0;idx<dparam.gnum;idx++) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] ==TYPEBOUNDARY || mark[idx]==TYPEVACUUM ) { rho0.data[idx] = 0.; } else rho0.data[idx] = 1; for(int Qm = 0; Qm < 19; Qm++) { f0(i,j,k,Qm) = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, rho0(idx), make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); //if(f0(i,j,k,Qm))printf("(%d,%d,%d)-%f\n", i, j, k, f0(i, j, k,Qm)); } } } __global__ void deriveLBMquantities_k(farray ux, farray uy, farray uz, charray mark, farray f, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) // for(idx=0;idx<dparam.gnum;idx++) {// calculate average density //if (mark[idx] == TYPEVACUUM || mark[idx] == TYPEBOUNDARY) // return; rho.data[idx] = 0; int i,j,k; getijk(i,j,k,idx); for ( int Qm= 0; Qm < 19; Qm++) { rho(i,j,k) += f(i,j,k,Qm); } // calculate average velocity u ux.data[idx] = 0; uy.data[idx] = 0; uz.data[idx] = 0; if (rho.data[idx] > 0) { for (int Qm = 0; Qm < 19; Qm++) { ux.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].x; uy.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].y; uz.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].z; } float s = 1 / rho.data[idx]; ux.data[idx] *= s; uy.data[idx] *= s; uz.data[idx] *= s; } // rescale in case maximum velocity is exceeded float n = Vec3_Norm(make_float3(ux(idx), uy(idx), uz(idx))); if (n > v_max) { ux.data[idx] *= v_max / n; uy.data[idx] *= v_max / n; uz.data[idx] *= v_max / n; } } } __global__ void CalcLBMcollision_k(farray ux, farray uy, farray uz, charray mark, farray df, farray dF, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; //for(int idx=0;idx<dparam.gnum;idx++) if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEBOUNDARY || mark[idx] == TYPEVACUUM) return; for (int Qm = 0; Qm < 19; Qm++) { // calculate equilibrium distribution function float f_eq; f_eq = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, rho(idx), make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); // perform collision dF(i, j, k, Qm) = (1 - LBM_dparam.omega)*df(i, j, k, Qm) + LBM_dparam.omega * f_eq; //gravity dF(i, j, k, Qm) += rho(idx) * LBM_dparam.weight[Qm] * dot(LBM_dparam.vel_i[Qm], make_float3(0,0,-0.1));//gravity==-0.1 df(i, j, k, Qm) = dF(i,j,k,Qm); } } } __global__ void LBMStream_k(farray ux, farray uy, farray uz, charray mark, farray df_stream, farray dF, farray rho,float *mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; int neighidx; int ii, jj, kk;//after stream getijk(i, j, k, idx); if (mark[idx] == TYPEBOUNDARY || mark[idx] == TYPEVACUUM) return; // copy distribution function corresponding to velocity zero df_stream(i, j, k, 0) = dF(i, j, k, 0); float df_neigh[19] = { 0 }; if (mark[idx] & (TYPEFLUID | TYPESOLID)) for (int Qm = 1; Qm < 19; Qm++)//omite zero vector { ii = i - LBM_dparam.vel_i[Qm].x; jj = j - LBM_dparam.vel_i[Qm].y; kk = k - LBM_dparam.vel_i[Qm].z; neighidx = getidx(ii, jj, kk); df_neigh[Qm] = dF(ii,jj,kk,Qm); // fluid cell must not be adjacent to an empty cell assert((mark[neighidx] & TYPEVACUUM) == 0); if (mark[neighidx] & (TYPEFLUID | TYPESURFACE |TYPESOLID)) { // mass exchange with fluid or interface cell, Eq. (4.2) mass[idx] += (df_neigh[Qm] - dF(i,j,k,LBM_dparam.invVel_i[Qm])); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else //type & CT_OBSTACLE df_stream(i, j, k, Qm) = dF[i,j,k,LBM_dparam.invVel_i[Qm]];// reflect density functions, Eq. (3.5) } else if (mark[idx] & TYPESURFACE) { const float epsilon = CalcEpsilon(mark[idx], rho[idx], mass[idx]); // calculate atmospheric equilibrium distribution function real f_atm_eq[19]; for(int Qm=0;Qm<19;Qm++) f_atm_eq[Qm] = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, 1.0,//rhoA make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { df_neigh[Qm] = dF(i - LBM_dparam.vel_i[Qm].x, j - LBM_dparam.vel_i[Qm].y, k - LBM_dparam.vel_i[Qm].z, Qm); if (mark[neighidx] & TYPEFLUID) { // mass exchange between fluid and interface cell, Eq. (4.2) mass[idx] += (df_neigh[Qm] - dF(i, j, k, LBM_dparam.invVel_i[Qm])); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else if (mark[neighidx] & TYPESURFACE) { const float eps_neigh = CalcEpsilon(mark[neighidx], rho[neighidx], mass[neighidx]); // mass exchange between two interface cells, Eq. (4.3) mass[idx] += CalcMassExchange(mark[idx], mark[neighidx], df_neigh[Qm], dF[i,j,k,LBM_dparam.invVel_i[Qm]])*0.5*(eps_neigh + epsilon); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else if (mark[neighidx] & TYPEVACUUM) // no mass exchange from or to empty cell // reconstructed atmospheric distribution function, Eq. (4.5) df_stream(i, j, k, Qm) = f_atm_eq[Qm] + f_atm_eq[LBM_dparam.invVel_i[Qm]] - dF(i,j,k,LBM_dparam.invVel_i[Qm]); else // df_neigh->type & CT_OBSTACLE { // reflect density functions, Eq. (3.5) df_stream(i, j, k, Qm) = dF(i, j, k, LBM_dparam.invVel_i[Qm]); } } // calculate surface normal const float3 norm = CalcLBMNormal(mark, rho, mass, i, j, k); // always use reconstructed atmospheric distribution function for directions along surface normal; // separate loop to handle mass exchange correctly for (int Qm = 1; Qm < 19; Qm++)//omite zero vector { if (dot(norm, LBM_dparam.vel_i[LBM_dparam.invVel_i[Qm]]) > 0) // Eq.4.6 // reconstructed atmospheric distribution function, Eq. (4.5) df_stream(i, j, k, Qm) = f_atm_eq[Qm] + f_atm_eq[LBM_dparam.invVel_i[Qm]] - dF(i, j, k, LBM_dparam.invVel_i[Qm]); } }// df->type & TYPESURFACE for (int Qm = 0; Qm < 19; Qm++) dF(i, j, k, Qm) = df_stream(i, j, k, Qm); } } __global__ void initLBMmass_k(charray mark, float* dgmass, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark[idx] == TYPEFLUID || mark[idx]== TYPESOLID) dgmass[idx] = rho[idx]; else if (mark[idx] == TYPESURFACE) dgmass[idx] = rho[idx] * 0.5; else dgmass[idx] = 0; } } __global__ void LBMFluidmass2rho_k(charray mark, farray rho, float* mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark[idx] & (TYPEFLUID | TYPESOLID)) { assert(fabs(rho[idx] / mass[idx] - 1) < 5e-6); rho[idx] = mass[idx]; } } } __global__ void LBMUpdateType1_k(charray mark,charray oldmark, float3 * oldnorm, farray df, farray df_next, farray rho, float* mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); //copy typies oldmark[idx] = mark[idx]; oldnorm[idx] = CalcLBMNormal(mark, rho, mass, i, j, k);//4 // current cell //for (int Qm = 0; Qm < 19; Qm++) // if(df_next(i, j, k, Qm) != df(i, j, k, Qm))printf("!!!!!!!!!!!!!!!!!!!!"); // check whether interface cells emptied or filled if (mark[idx] & TYPESURFACE) { // Eq. (4.7), and remove interface cell artifacts if ( (mass[idx] > (1 + FILL_OFFSET)*rho[idx]) || (mass[idx] >= (1 - LONELY_THRESH)*rho[idx] && (mark[idx] & TYPENOEMPTYMEIGH))) //surface to fluid cell mark[idx] = TYPE_IF_TO_FLUID; else if ( (mass[idx] < -FILL_OFFSET*rho[idx]) || ((mass[idx] <= LONELY_THRESH*rho[idx]) && (mark[idx] & TYPENOFLUIDNEIGH)) || ((mark[idx] & TYPENOIFACENEIGH) && (mark[idx] & TYPENOFLUIDNEIGH))) // interface to empty cell mark[idx] = TYPE_IF_TO_EMPTY; } // clear neighborhood flags (will be determined later) mark[idx] &= ~(TYPENOFLUIDNEIGH | TYPENOIFACENEIGH | TYPENOIFACENEIGH); } } __global__ void LBMUpdateType2_k(charray mark, charray oldmark, farray df, farray df_next, farray rho,farray tmprho, float* mass, farray ux,farray uy,farray uz, farray tmpux, farray tmpuy, farray tmpuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); float df_neigh[19] = { 0 }; tmprho[idx] = rho[idx]; tmpux[idx] = ux[idx]; tmpuy[idx] = uy[idx]; tmpuz[idx] = uz[idx]; // set flags for filled interface cells (interface to fluid) if (mark[idx] & TYPE_IF_TO_FLUID) { // keep flag 'CT_IF_TO_FLUID' for later excess mass distribution // convert neighboring empty cells to interface cells for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { df(i, j, k, Qm) = df_next(i, j, k, Qm);//useless? int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; df_neigh[Qm] = df_next(i_neigh, j_neigh, k_neigh, Qm); int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEVACUUM) { mark[idxneig] = TYPESURFACE; // initialize cell with average density and velocity of surrounding cells, using f0//!!! LBMAverageSurrounding(oldmark, mass, rho[idxneig],tmprho, df_next, ux[idxneig], uy[idxneig],uz[idxneig], tmpux, tmpuy, tmpuz, i_neigh,j_neigh,k_neigh,Qm); //charray mark, float* mass, float &rho, farray tmprho, farray df, float &ux, float &uy, float &uz, farray tmpux, farray tmpuy, farray tmpuz, int i, int j, int k } } // prevent neighboring cells from becoming empty for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { //neighbor cell int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; //df_neigh[Qm] = df_next(i_neigh, j_neigh, k_neigh, Qm); int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPE_IF_TO_EMPTY) mark[idxneig] = TYPESURFACE; //df(i, j, k, Qm) = df_next(i, j, k, Qm) } } } } __global__ void LBMUpdateType3_k(charray mark, farray df, farray df_next, farray rho, float* mass) { // set flags for emptied interface cells (interface to empty) int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); if (mark[idx] & TYPE_IF_TO_EMPTY) // keep flag 'CT_IF_TO_EMPTY' for later excess mass distribution // convert neighboring fluid cells to interface cells for (int Qm = 1; Qm < 19; Qm++)//omit zero vector { //neighbor cell int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEFLUID) mark[idxneig] = TYPESURFACE; } } } __global__ void LBMUpdateType4_k(charray mark, charray markold, float3* oldnorm, farray df_distr, farray dF, farray rho, float* mass) { // distribute excess mass int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); // calculate surface normal using 'f0', such that excess mass distribution is independent of the filled cell ordering float3 norm = oldnorm[idx]; //excess mass float mex; if (mark[idx] & TYPE_IF_TO_FLUID) { mex = mass[idx] - rho[idx]; // after excess mass has been distributed, remaining mass equals density mass[idx] = rho[idx]; } else if (mark[idx] & TYPE_IF_TO_EMPTY) { mex = mass[idx]; //flip sign of noraml; norm.x = -norm.x; norm.y = -norm.y; norm.z = -norm.z; // after negative excess mass has been distributed, remaining mass is zero mass[idx] = 0; } else return; // Eq. (4.9) float eta[19] = { 0 }; float eta_total = 0; unsigned int isIF[19] = { 0 }; unsigned int numIF = 0;// number of interface cell neighbors for (int Qm = 0; Qm < 19; Qm++) df_distr(i, j, k, Qm) = 0; for (int Qm = 1; Qm < 19; Qm++)//omit zero vector { // neighbor cell in the direction of velocity vector int i_neigh = i + LBM_dparam.vel_i[Qm].x; int j_neigh = j + LBM_dparam.vel_i[Qm].y; int k_neigh = k + LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPESURFACE) { eta[Qm] = dot(LBM_dparam.vel_i[Qm], norm); if (eta[Qm] < 0) eta[Qm] = 0; eta_total += eta[Qm]; isIF[Qm] = 1; numIF++; } // store excess mass to be distributed in 'f_distr'; // don't actually distribute yet to ensure independence of cell traversal order // cell for excess mass distribution, store in distribution functions if (eta_total > 0) { float eta_fac = 1 / eta_total; for (int Qm = 1; Qm < 19; Qm++)//omit zere vector { // eta[i] is zero for non-interface cells df_distr(i, j, k, Qm) = mex*eta[Qm] * eta_fac; } } else if (numIF > 0) { // distribute uniformly float mex_rel = mex / numIF; for (int Qm = 1; Qm < 19; Qm++)//omit zere vector { df_distr(i, j, k, Qm) = (isIF[Qm] ? mex_rel : 0); //df } } // else, excess mass cannot be distributed, i.e., has leaked // dF(i, j, k, Qm) = df_distr(i, j, k, Qm); } } } __global__ void LBMUpdateType5_k(charray mark, farray df, farray dF, farray rho, float* mass) { //// collect distributed mass and finalize cell flags int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); if (mark[idx] & TYPESURFACE) for (int Qm = 1; Qm < 19; Qm++) { // neighbor cell in the direction of velocity vector int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); mass[idx] += df(i_neigh,j_neigh,k_neigh,Qm); } else if (mark[idx] & TYPE_IF_TO_FLUID) mark[idx] = TYPEFLUID; else if (mark[idx] & TYPE_IF_TO_EMPTY) mark[idx] = TYPEVACUUM; // assert((mark[idx] & (CT_OBSTACLE | CT_FLUID | CT_INTERFACE | CT_EMPTY)) != 0); // assert((mark[idx] & ~(CT_OBSTACLE | CT_FLUID | CT_INTERFACE | CT_EMPTY)) == 0); } } __global__ void LBMUpdateType6_k(charray mark, farray df, farray df_next, farray rho, float* mass) { // set cell neighborhood flags int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { // ignore obstacle cells if (mark[idx] & TYPEBOUNDARY) return; // set "no ... neighbor" flags mark[idx] |= (TYPENOEMPTYMEIGH | TYPENOFLUIDNEIGH | TYPENOIFACENEIGH); for(int Qm = 1; Qm < 19; Qm++)//omit zere vector { // neighbor cell in the direction of velocity vector int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEFLUID) //remove "no fluid neighbor" flag mark[idx] &= ~TYPENOFLUIDNEIGH; else if (mark[idxneig] & TYPEVACUUM) // remove "no empty neighbor" flag mark[idx] &= ~TYPENOEMPTYMEIGH; else if (mark[idxneig] & TYPESURFACE) // remove "no interface neighbor" flag mark[idx] &= ~TYPENOIFACENEIGH; } // both flags should not be set simultaneously if (mark[idx] & TYPENOEMPTYMEIGH) mark[idx] &= ~TYPENOFLUIDNEIGH; } } //***********************************************************************************/
b0cb8eb0185425da5ae47abae68094795a5afe6f.cu
#include <cuda_runtime.h> // includes cuda.h and cuda_runtime_api.h #include "spray_k.cuh" #include<helper_cuda.h> #include<helper_math.h> #include "utility.h" #include "tables.h" __constant__ FlipConstant dparam; __constant__ LBMConstant LBM_dparam; __constant__ int NX; __constant__ int NY; __constant__ int NZ; __constant__ int NXMC; __constant__ int NYMC; __constant__ int NZMC; texture<uint, 1, cudaReadModeElementType> edgeTex; texture<uint, 1, cudaReadModeElementType> triTex; texture<uint, 1, cudaReadModeElementType> numVertsTex; __device__ float racc = 0.; __device__ float wacc = 0.; __device__ float3 pacc; __device__ float sradiusInv; __constant__ float v_max = (float)0.816496580927726; //!< set maximum velocity to sqrt(2/3), such that f_eq[0] >= 0 __host__ __device__ inline int getidx(int i, int j, int k) { return (i*NZ*NY + j*NZ + k); } inline __host__ __device__ int dot(int3 a,float3 b) { return a.x * b.x + a.y * b.y + a.z*b.z; } inline __host__ __device__ int dot(float3 a, int3 b) { return a.x * b.x + a.y * b.y + a.z*b.z; } //**********************************LBM***************************************** __host__ __device__ float LBMfeq(float3 u, float omega, float rho, int3 vel_i ,float RT0)//vel_i==e[qm][3] { float feq; float3 vel; vel.x = (float)vel_i.x, vel.y = (float)vel_i.y; vel.z = (float)vel_i.z; feq = omega * rho * (1.0 + dot(u, vel) / RT0 + 0.5*dot(u, vel*dot(u, vel) / RT0 / RT0 - dot(u,u) / (2 * RT0))); return feq; } __device__ inline float Vec3_Norm(const float3 v) { // have to change to 'fabs' for 'typedef double real' float a = fabsf(v.x); float b = fabsf(v.y); float c = fabsf(v.z); if (a < b) { if (b < c) { return c*sqrtf(1 +pow(a/c,2) + pow(b/c,2)); } else // a < b, c <= b { return b*sqrtf(1 + pow(a/b,2) + pow(c/b,2)); } } else // b <= a { if (a < c) { return c*sqrtf(1 + pow(a / c,2) + pow(b / c,2)); } else // b <= a, c <= a { if (a != 0) { return a*sqrtf(1 + pow(b / a,2) + pow(c / a,2)); } else { return 0; } } } } __host__ __device__ float LBMheq(float3 u, float omega, float rho, int3 vel_i, float T,float RT0)//vel_i==e[qm][3] { float feq,heq,E; float3 vel; float cv, p0; vel.x = (float)vel_i.x, vel.y = (float)vel_i.y; vel.z = (float)vel_i.z; E = cv*T + dot(u, u) / 2.0; feq = omega * rho * (1.0 + dot(u, vel) / RT0 + 0.5*dot(u, vel*dot(u, vel) / RT0 / RT0 - dot(u, u) / (2 * RT0))); heq = omega * p0 * (dot(vel, u) / RT0 + dot(vel, u)*dot(vel, u) / RT0 / RT0 - dot(u, u) / RT0 + 0.5*(dot(vel, vel) / RT0 - 3.0)) + E*feq; return feq; } __device__ inline float CalcEpsilon(char mark, float rho, float mass) { if (mark & (TYPEFLUID | TYPEBOUNDARY | TYPESOLID)) { return 1; } else if (mark & TYPESURFACE) { assert( rho >= 0); if (rho > 0) { real epsilon = mass / rho; // df->mass can even be < 0 or > df->rho for interface cells to be converted to fluid or empty cells in the next step; // clamp to [0,1] range for numerical stability if (epsilon > 1) { epsilon = 1; } else if (epsilon < 0) { epsilon = 0; } return epsilon; } else { // return (somewhat arbitrarily) a ratio of 1/2 return (real)0.5; } } else // df->type & CT_EMPTY { assert(mark & TYPEVACUUM); return 0; } } __device__ inline float3 CalcLBMNormal(charray mark,farray rho, float* mass, int i,int j, int k) { float3 norm; norm.x = 0.5*(CalcEpsilon(mark[getidx(i - 1, j, k)], rho[getidx(i - 1, j, k)], mass[getidx(i - 1, j, k)]) - CalcEpsilon(mark[getidx(i + 1, j, k)], rho[getidx(i + 1, j, k)], mass[getidx(i + 1, j, k)])); norm.y = 0.5*(CalcEpsilon(mark[getidx(i, j - 1, k)], rho[getidx(i, j - 1, k)], mass[getidx(i, j - 1, k)]) - CalcEpsilon(mark[getidx(i, j + 1, k)], rho[getidx(i, j + 1, k)], mass[getidx(i, j + 1, k)])); norm.z = 0.5*(CalcEpsilon(mark[getidx(i, j, k - 1)], rho[getidx(i, j, k - 1)], mass[getidx(i, j, k - 1)]) - CalcEpsilon(mark[getidx(i, j, k + 1)], rho[getidx(i, j, k + 1)], mass[getidx(i, j, k + 1)])); return norm; } __device__ inline float CalcMassExchange(char mark, char neighmark, float df_neigh, float dF_inv) { // Table 4.1 in Nils Thuerey's PhD thesis if (mark & TYPENOFLUIDNEIGH) { if (neighmark & TYPENOFLUIDNEIGH) return df_neigh - dF_inv; else // neighbor is standard cell or CT_NO_EMPTY_NEIGH return -dF_inv; } else if (mark & TYPENOEMPTYMEIGH) { if (neighmark & TYPENOEMPTYMEIGH) return df_neigh - dF_inv; else // neighbor is standard cell or CT_NO_FLUID_NEIGH return df_neigh; } else { // current cell is standard cell if(neighmark & TYPENOFLUIDNEIGH) return df_neigh; else if (neighmark & TYPENOEMPTYMEIGH) return -dF_inv; else// neighbor is standard cell return df_neigh - dF_inv; } } __device__ inline void LBMAverageSurrounding(charray mark, float * mass, float &rho, farray tmprho, farray df, float &ux, float &uy, float &uz,farray tmpux, farray tmpuy,farray tmpuz, int i, int j, int k,int Qm) { int q; int n = 0,ii,jj,kk,idxneigh; // set mass initially to zero mass[getidx(i,j,k)] = 0; rho = 0; ux = 0; uy = 0; uz = 0; float df_neigh[19] = { 0 }; for (q = 1; q < 19; q++)// omit zero vector { ii = i - LBM_dparam.vel_i[q].x; jj = j - LBM_dparam.vel_i[q].y; kk = k - LBM_dparam.vel_i[q].z; idxneigh = getidx(ii, jj, kk); // fluid or interface cells only if (mark[idxneigh] & (TYPEFLUID | TYPESOLID | TYPESURFACE)) { rho += tmprho(idxneigh); ux += tmpux(idxneigh); uy += tmpuy(idxneigh); uz += tmpuz(idxneigh); n++; } } if (n > 0) { rho /= n; ux /= n; uy /= n; uz /= n; } // calculate equilibrium distribution function for (Qm = 0; Qm < 19; Qm++) df[i,j,k,Qm] = LBMfeq(make_float3(ux, uy, uz), LBM_dparam.omega, rho, make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); } //*********************************LBM**************************************** void copyparamtoGPU(FlipConstant hparam) { checkCudaErrors(cudaMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } //*********************************************************************** void copyLBMparamtoGPU(LBMConstant hparam) { checkCudaErrors(cudaMemcpyToSymbol(LBM_dparam, &hparam, sizeof(LBMConstant))); } //************************************************************ void LBMcopyparamtoGPU(FlipConstant hparam) { checkCudaErrors(cudaMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } void copyNXNYNZtoGPU(int nx, int ny, int nz) { checkCudaErrors(cudaMemcpyToSymbol(NX, &nx, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NY, &ny, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NZ, &nz, sizeof(int))); } void copyNXNYNZtoGPU_MC(int nx, int ny, int nz) { checkCudaErrors(cudaMemcpyToSymbol(NXMC, &nx, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NYMC, &ny, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NZMC, &nz, sizeof(int))); } __device__ inline void getijk(int &i, int &j, int &k, int &idx) { i = idx / (NZ*NY); j = idx / NZ%NY; k = idx%NZ; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos) { pos = (pos - dparam.gmin) / dparam.cellsize; i = (pos.x >= 0 && pos.x<NX) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<NY) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<NZ) ? ((int)pos.z) : 0; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos, int w, int h, int d, float dx) { pos = (pos - dparam.gmin) / dx; i = (pos.x >= 0 && pos.x<w) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<h) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<d) ? ((int)pos.z) : 0; } __device__ inline int getidx(int i, int j, int k, int w, int h, int d) { return (i*h*d + j*d + k); } __device__ inline float getRfromMass(float m) { return pow(m*0.75f / M_PI / dparam.waterrho, 0.333333); } __device__ inline float getMassfromR(float r) { return dparam.waterrho*M_PI*4.0 / 3 * r*r*r; } //计算散度 __global__ void cptdivergence(farray outdiv, farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEFLUID) div = (ux(i + 1, j, k) - ux(i, j, k) + uy(i, j + 1, k) - uy(i, j, k) + uz(i, j, k + 1) - uz(i, j, k)) / h; outdiv[idx] = div; } } __device__ inline int clampidx(int i, int j, int k) { i = max(0, min(i, NX - 1)); j = max(0, min(j, NY - 1)); k = max(0, min(k, NZ - 1)); return (i*NZ*NY + j*NZ + k); } __device__ inline float trilinear(farray u, float x, float y, float z, int w, int h, int d) { x = fmaxf(0.0f, fminf(x, w)); y = fmaxf(0.0f, fminf(y, h)); z = fmaxf(0.0f, fminf(z, d)); int i = fminf(x, w - 2); int j = fminf(y, h - 2); int k = fminf(z, d - 2); return (k + 1 - z)*((j + 1 - y)*((i + 1 - x)*u(i, j, k) + (x - i)*u(i + 1, j, k)) + (y - j)*((i + 1 - x)*u(i, j + 1, k) + (x - i)*u(i + 1, j + 1, k))) + (z - k)*((j + 1 - y)*((i + 1 - x)*u(i, j, k + 1) + (x - i)*u(i + 1, j, k + 1)) + (y - j)*((i + 1 - x)*u(i, j + 1, k + 1) + (x - i)*u(i + 1, j + 1, k + 1))); } __device__ float3 getVectorFromGrid(float3 pos, farray phigrax, farray phigray, farray phigraz) { float3 res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 res.x = trilinear(phigrax, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.y = trilinear(phigray, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.z = trilinear(phigraz, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } __device__ float getScaleFromFrid(float3 pos, farray phi) { float res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 res = trilinear(phi, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } //Jacobi iteration: Ax=b //todo: check this function and maybe get another solver. __global__ void JacobiIter(farray outp, farray p, farray b, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float resp = 0, h = dparam.cellsize.x; float p1, p2, p3, p4, p5, p6; float p0 = p[idx]; int i, j, k; if (mark[idx] == TYPEFLUID) { getijk(i, j, k, idx); p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : p(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : p(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : p(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : p(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : p(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : p(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 - h*h*b(i, j, k)) / 6.0f; } outp[idx] = resp; } } __global__ void setPressBoundary(farray press) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0) press[idx] = press(i + 1, j, k); if (j == 0) press[idx] = press(i, j + 1, k); if (k == 0) press[idx] = press(i, j, k + 1); if (i == NX - 1) press[idx] = press(i - 1, j, k); if (j == NY - 1) press[idx] = press(i, j - 1, k); if (k == NZ - 1) press[idx] = press(i, j, k - 1); } } //压强与速度的计算 __global__ void subGradPress(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } __device__ float3 getParticleVelFromGrid(float3 pos, farray ux, farray uy, farray uz) { float3 vel; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 vel.x = trilinear(ux, x, y - 0.5f, z - 0.5f, NX + 1, NY, NZ); vel.y = trilinear(uy, x - 0.5f, y, z - 0.5f, NX, NY + 1, NZ); vel.z = trilinear(uz, x - 0.5f, y - 0.5f, z, NX, NY, NZ + 1); return vel; } __global__ void mapvelg2p_flip(float3 *ppos, float3 *vel, char* parflag, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = getParticleVelFromGrid(ipos, ux, uy, uz); vel[idx] += gvel; } } __device__ inline float sharp_kernel(float r2, float h) { return fmax(h*h / fmax(r2, 0.0001f) - 1.0f, 0.0f); } __global__ void mapvelp2g_slow(float3 *pos, float3 *vel, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float w, weight, RE = 1.4, dis2, usum; float3 gpos; float scale = 1 / dparam.cellsize.x; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = 0; getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].x; } usum = (weight>0) ? (usum / weight) : 0.0f; ux(i, j, k) = usum; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot((pos[p] * scale) - gpos, (pos[p] * scale) - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].y; } usum = (weight>0) ? (usum / weight) : 0.0f; uy(i, j, k) = usum; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].z; } usum = (weight>0.00001) ? (usum / weight) : 0.0f; uz(i, j, k) = usum; } } __device__ inline bool verifycellidx(int i, int j, int k) { if (i<0 || i>NX - 1 || j<0 || j>NY - 1 || k<0 || k>NZ - 1) return false; return true; } __device__ inline bool verifycellidx(int i, int j, int k, int w, int h, int d) { if (i<0 || i>w - 1 || j<0 || j>h - 1 || k<0 || k>d - 1) return false; return true; } __global__ void addgravityforce_k(float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEFLUID ) vel[idx] += dt*dparam.gravity; if ( parflag[idx] == TYPESOLID) vel[idx] += dt*dparam.gravity ; } } __global__ void addbuoyancyforce_k(float dheight, float3 *pos, float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIR) vel[idx] -= dt*dparam.gravity * 1.1f; //todo:这里的浮力可以小一些,让气泡上的慢一些,视频快一些,水看起来就不太粘了。 else if (parflag[idx] == TYPEAIRSOLO) vel[idx] -= dt*dparam.gravity * 1.1f; else if (parflag[idx] == TYPESOLID) vel[idx] -= dt*dparam.gravity * 0.5f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // 液面下固体粒子受浮力 // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void addbuoyancyforce_vel(float velMax, float3 *pos, float3 *vel, char* parflag, int pnum, float dt, float buoyanceRateAir, float buoyanceRateSolo) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { float rate = fmax(velMax - vel[idx].z, 0.0f) / velMax; if (parflag[idx] == TYPEAIR) vel[idx].z -= dt*dparam.gravity.z * rate * buoyanceRateAir; //todo:这里的浮力可以小一些,让气泡上的慢一些,视频快一些,水看起来就不太粘了。 else if (parflag[idx] == TYPEAIRSOLO) vel[idx].z -= dt*dparam.gravity.z *rate* buoyanceRateSolo; else if (parflag[idx] == TYPESOLID) vel[idx].z -= dt*dparam.gravity.z * .55f;//0.55f; //else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // 液面下固体粒子受浮力 // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void advectparticle(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); //vel[idx] += dt*dparam.gravity; ipos += gvel*dt; if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //check boundary ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, ipos.z); if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void advectparticle_RK2(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint = getParticleVelFromGrid(midpoint, ux, uy, uz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back if (parflag[idx] != TYPESOLID) { pvel[idx] = ivel; ppos[idx] = ipos; } else pvel[idx] = ivel; } } __global__ void flipAirVacuum(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == TYPEVACUUM) mark[idx] = TYPEAIR; } } __global__ void markair(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { mark[idx] = TYPEAIR; } } __global__ void markforsmoke(charray mark, farray spraydense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { /* if(spraydense[idx]>0 )*/ mark[idx] = TYPEFLUID; } } __global__ void markfluid(charray mark, float3 *pos, char *parflag, int pnum) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { int i, j, k; //todo: ???? Should spray particle count??? or should we have a more accurate mark method. if( parflag[idx]==TYPEFLUID) { getijkfrompos(i, j, k, pos[idx]); mark(i, j, k) = TYPEFLUID; //应该是不需要原子操作的,重复写不会有问题 } } } //判断一下格子里含有的fluid particle的数量,再决定格子的属性 __global__ void markfluid_dense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) mark[idx] = TYPEVACUUM; else if (cntfluidsolid>cntair) mark[idx] = TYPEFLUID; else mark[idx] = TYPEAIR; } } //************************LBM***************************** //判断一下格子里含有的fluid particle的数量,再决定格子的属性 __global__ void markfluid_LBM_Init(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int Thres)//多出surface 格子 { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p < end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) //for now (181210), there is no air cell considered in LBM framework cntair++; } } if (cntfluidsolid == 0 ) mark[idx] = TYPEVACUUM; else if (cntfluidsolid >= Thres) // initial particle number per cell is 8 mark[idx] = TYPEFLUID; else mark[idx] = TYPESURFACE; // particle number [1,7] } } __global__ void markfluid_LBMdense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) //int j = 0; //for(int idx=0; idx<dparam.gnum; idx++) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p < end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) { mark[idx] = TYPEVACUUM; } else if (cntfluidsolid > cntair) { mark[idx] = TYPEFLUID; // printf("%d ", cntfluidsolid); } else mark[idx] = TYPEAIR; } } __global__ void markBoundaryCell(charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY; } } __global__ void setgridcolor_k(float* color, ECOLORMODE mode, farray p, farray ux, farray uy, farray uz, farray div, farray phi, charray mark, farray ls, farray tp, float sigma, float temperatureMax, float temperatureMin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float3 rescolor = make_float3(0.0); int cellindex = NY / 2; if (mode == COLOR_PRESS) { if (j != cellindex || p[idx] == 0) rescolor = make_float3(0, 0, 1); else if (p[idx]>0) rescolor = make_float3(0, 1, 0); else if (p[idx]<0) rescolor = make_float3(1, 0, 0); //rescolor = mapColorBlue2Red( 30000*abs(p[idx]) ); } else if (mode == COLOR_UX) { if (j != cellindex || ux(i + 1, j, k) + ux(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(ux(i + 1, j, k) + ux(i, j, k))); } else if (mode == COLOR_UY) { if (j != cellindex || uy(i, j + 1, k) + uy(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(uy(i, j + 1, k) + uy(i, j, k))); } else if (mode == COLOR_UZ) { if (j != cellindex/*||uz(i,j,k+1)+uz(i,j,k)<0*/) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(5 * abs(uz(i, j, k))); } else if (mode == COLOR_DIV) { if (j != cellindex || div[idx] == 0) rescolor = make_float3(0, 0, 1); else if (div[idx]>0) rescolor = make_float3(0, 1, 0); else if (div[idx]<0) rescolor = make_float3(1, 1, 0); } else if (mode == COLOR_PHI) { if (phi[idx]>3 * NX - 1 || j != cellindex) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5f + phi[idx]); } else if (mode == COLOR_MARK) { if (j != cellindex) rescolor = make_float3(0, 0, 1); else { if (mark[idx] == TYPEAIR) rescolor = make_float3(0, 1, 0); else if (mark[idx] == TYPEFLUID) rescolor = make_float3(1, 0, 0); else if (mark[idx] == TYPEVACUUM) rescolor = make_float3(1, 1, 0); else if (mark[idx] == TYPEBOUNDARY) rescolor = make_float3(0, 1, 1); else rescolor = make_float3(0, 0, 1); //rescolor = mapColorBlue2Red( (int)(mark[idx])+1.0f ) ; } } else if (mode == COLOR_LS) { if (j == cellindex && ls[idx]>0) rescolor = mapColorBlue2Red(abs(ls[idx] / dparam.cellsize.x)); else rescolor = make_float3(0, 0, 1); } else if (mode == COLOR_TP) { if (j != cellindex || i == 0 || i == NX - 1 || k == 0 || k == NZ - 1) rescolor = make_float3(0, 0, 1); else // rescolor = mapColorBlue2Red( abs(tp[idx]*dparam.cellsize.x*5/sigma) ); //rescolor = mapColorBlue2Red( abs(tp[idx]-353)/5.0f ); rescolor = mapColorBlue2Red((tp[idx] - temperatureMin) / (temperatureMax - temperatureMin)*6.0f); } color[idx * 3] = rescolor.x; color[idx * 3 + 1] = rescolor.y; color[idx * 3 + 2] = rescolor.z; } } __host__ __device__ inline float3 mapColorBlue2Red(float v) { float3 color; if (v<0) return make_float3(0.0f, 0.0f, 1.0f); int ic = (int)v; float f = v - ic; switch (ic) { case 0: { color.x = 0; color.y = f / 2; color.z = 1; } break; case 1: { color.x = 0; color.y = f / 2 + 0.5f; color.z = 1; } break; case 2: { color.x = f / 2; color.y = 1; color.z = 1 - f / 2; } break; case 3: { color.x = f / 2 + 0.5f; color.y = 1; color.z = 0.5f - f / 2; } break; case 4: { color.x = 1; color.y = 1.0f - f / 2; color.z = 0; } break; case 5: { color.x = 1; color.y = 0.5f - f / 2; color.z = 0; } break; default: { color.x = 1; color.y = 0; color.z = 0; } break; } return color; } __global__ void initphi(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) phi[idx] = -0.5; else phi[idx] = NX * 3; } } __global__ void initSolidPhi(farray phi, uint *gridstart, uint *gridend, char *pflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { bool flag = false; uint start = gridstart[idx]; if (start != CELL_UNDEF) { for (; start<gridend[idx]; start++) { if (pflag[start] == TYPESOLID) flag = true; } } if (flag) phi[idx] = -0.5f; else phi[idx] = 3 * NX; } } __device__ void solvedistance(float a, float b, float c, float &x) { float d = fmin(a, fmin(b, c)) + 1; if (d>fmax(a, fmax(b, c))) { d = (a + b + c + sqrt(3 - (a - b)*(a - b) - (a - c)*(a - c) - (b - c)*(b - c))) / 3; } if (d<x) x = d; } __global__ void sweepphi(farray phi) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepphibytype(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) return; int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepu(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray phi, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; //三个方向上的权重 if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i - 1, j, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ - 1) continue; wx = -di*(phi(i, j, k) - phi(i - 1, j, k)); if (wx<0) continue; wy = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j + dj, k) - phi(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j, k + dk) - phi(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i, j - 1, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(phi(i, j, k) - phi(i, j - 1, k)); if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j - 1, k) - phi(i + di, j, k) - phi(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (phi(i, j, k) + phi(i, j - 1, k) - phi(i, j, k + dk) - phi(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ( (mark(i, j, k) == TYPEBOUNDARY && mark(i, j, k - 1) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(phi(i, j, k) - phi(i, j, k - 1)); if (wz<0) continue; wy = (phi(i, j, k) + phi(i, j, k - 1) - phi(i, j + dj, k) - phi(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j, k - 1) - phi(i + di, j, k) - phi(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } __global__ void setSmokeBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (j == 0) ux(i, j, k) = ux(i, j + 1, k); else if (j == NY - 1) ux(i, j, k) = ux(i, j - 1, k); else if (k == 0) ux(i, j, k) = ux(i, j, k + 1); else if (k == NZ - 1) ux(i, j, k) = ux(i, j, k - 1); else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (i == 0) uy(i, j, k) = uy(i + 1, j, k); else if (i == NX - 1) uy(i, j, k) = uy(i - 1, j, k); else if (k == 0) uy(i, j, k) = uy(i, j, k + 1); else if (k == NZ - 1) uy(i, j, k) = uy(i, j, k - 1); else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 2) uz(i, j, k) = 0.0f; else if (i == 0) uz(i, j, k) = uz(i + 1, j, k); else if (i == NX - 1) uz(i, j, k) = uz(i - 1, j, k); else if (j == 0) uz(i, j, k) = uz(i, j + 1, k); else if (j == NY - 1) uz(i, j, k) = uz(i, j - 1, k); else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void setWaterBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 1) //特殊处理ceiling uz(i, j, k) = 0.0f; else if (k == uz.zn - 2) //ceiling. uz(i, j, k) = (uz(i, j, k - 1)<0) ? (uz(i, j, k - 1)) : 0; else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void computeDeltaU(farray ux, farray uy, farray uz, farray uxold, farray uyold, farray uzold) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) uxold[idx] = ux[idx] - uxold[idx]; if (idx < dparam.gvnum.y) uyold[idx] = uy[idx] - uyold[idx]; if (idx < dparam.gvnum.z) uzold[idx] = uz[idx] - uzold[idx]; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p); int gridindex = getidx(i, j, k); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD_MC(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p, NXMC, NYMC, NZMC, dparam.cellsize.x / NXMC*NX); int gridindex = getidx(i, j, k, NXMC, NYMC, NZMC); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index float3* sortedPos, // output: sorted positions float3* sortedVel, // output: sorted velocities char* sortedflag, float* sortedmass, float* sortedTemperature, float* sortedheat, float* sortedsolubility, float* sortedgascontain, uint * gridParticleHash, // input: sorted grid hashes uint * gridParticleIndex,// input: sorted particle indices float3* oldPos, // input: sorted position array float3* oldVel, // input: sorted velocity array char* oldflag, float* oldmass, float* oldtemperature, float* oldheat, float* oldsolubility, float* oldgascontain, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; //no. hash 's grid cellstart is index if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float3 pos = oldPos[sortedIndex]; // macro does either global read or texture fetch float3 vel = oldVel[sortedIndex]; // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; sortedflag[index] = oldflag[sortedIndex]; sortedmass[index] = oldmass[sortedIndex]; sortedTemperature[index] = oldtemperature[sortedIndex]; sortedheat[index] = oldheat[sortedIndex]; sortedsolubility[index] = oldsolubility[sortedIndex]; sortedgascontain[index] = oldgascontain[sortedIndex]; } } __global__ void advectux(farray outux, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); float3 pos = make_float3(i, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX || j == NY - 1 || k == NZ - 1) outux[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = ux[idx]; vel.y = (uy(i - 1, j, k) + uy(i - 1, j + 1, k) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = (uz(i - 1, j, k) + uz(i - 1, j, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(ux, oldpos.x, oldpos.y - 0.5f, oldpos.z - 0.5f, ux.xn, ux.yn, ux.zn); outux[idx] = oldu * velocitydissipation; } } } __global__ void advectuy(farray outuy, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.y) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uy.xn, uy.yn, uy.zn); float3 pos = make_float3(i + 0.5, j, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY || k == NZ - 1) outuy[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j - 1, k) + ux(i + 1, j - 1, k) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = uy[idx]; vel.z = (uz(i, j - 1, k) + uz(i, j - 1, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uy, oldpos.x - 0.5f, oldpos.y, oldpos.z - 0.5f, uy.xn, uy.yn, uy.zn); outuy[idx] = oldu * velocitydissipation; } } } __global__ void advectuz(farray outuz, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); float3 pos = make_float3(i + 0.5, j + 0.5, k); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ) outuz[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k - 1) + ux(i + 1, j, k - 1) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = (uy(i, j, k - 1) + uy(i, j + 1, k - 1) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = uz[idx]; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uz, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z, uz.xn, uz.yn, uz.zn); //float oldu = -dparam.dt*3.8f; outuz[idx] = oldu * velocitydissipation; } } } __global__ void advectscaler(farray outscalar, farray scalar, farray ux, farray uy, farray uz, float densedissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { //get pos of ux point int i, j, k; getijk(i, j, k, idx); float3 pos = make_float3(i + 0.5, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ - 1) outscalar[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k) + ux(i + 1, j, k))*0.5f; vel.y = (uy(i, j, k) + uy(i, j + 1, k))*0.5f; vel.z = (uz(i, j, k) + uz(i, j, k + 1))*0.5f; //enforce wind as an external velocity field. vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float olds = trilinear(scalar, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z - 0.5f, NX, NY, NZ); outscalar[idx] = olds * densedissipation; } } } __global__ void setsmokedense(farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, dense.xn, dense.yn, dense.zn); if (i>28 && i<36 && j>28 && j<36 && k<6) dense[idx] = dparam.m0*6.0f; } } __global__ void setsmokevel(farray uz, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; // if( k>1 && k<NZ-1 ) // if( dense(i,j,k-1)>0 ) // uz[idx] = 4.0f; if (k>1 && k<NZ - 1) { float alpha = 1000.0f; uz(i, j, k) += alpha * dense(i, j, k - 1); } } } __global__ void setsmokevel_nozzle(farray ux, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; //float alpha = 10000.0f; if (i>1 && i<NX - 1) if (dense(i - 1, j, k)>0) ux[idx] = 8.0f; //uz(i,j,k) += alpha * dense(i,j,k-1); } } surface<void, cudaSurfaceType3D> surfaceWrite; __global__ void writedens2surface_k(farray dens) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); // float4 idens = make_float4( 0.0f ); // if(i>10&&i<50 &&j>10&&j<50&&k>10&&k<50 ) // idens = make_float4( 1.0f ); float4 idens = make_float4(dens[idx] * 10000); surf3Dwrite(idens, surfaceWrite, i*sizeof(float4), j, k); //why *sizeof(float4)? } } void writedens2surface(cudaArray* cudaarray, int blocknum, int threadnum, farray dense) { cudaBindSurfaceToArray(surfaceWrite, cudaarray); //kernel writedens2surface_k << <blocknum, threadnum >> >(dense); } __device__ float smooth_kernel(float r2, float h) { return fmax(1.0f - r2 / (h*h), 0.0f); } __device__ float3 sumcellspring(float3 ipos, float3 *pos, float* pmass, char* parflag, uint *gridstart, uint *gridend, int gidx, float idiameter) { if (gridstart[gidx] == CELL_UNDEF) return make_float3(0.0f); uint start = gridstart[gidx]; uint end = gridend[gidx]; float dist, w; float3 spring = make_float3(0.0f); float r = 0; for (uint p = start; p<end; ++p) { //if( parflag[p]!=TYPESOLID ) //solid粒子也应该对别的粒子产生作用才对 { dist = length(pos[p] - ipos); r = idiameter;//+getRfromMass( pmass[p] ); w = pmass[p] * smooth_kernel(dist*dist, r); if (dist>0.1f*idiameter) //太近会产生非常大的弹力 spring += w*(ipos - pos[p]) / dist; } } return spring; } __global__ void correctparticlepos(float3* outpos, float3* ppos, float *pmass, char* parflag, int pnum, uint* gridstart, uint *gridend, float correctionspring, float correctionradius, float3 *pepos, float *peradius, int penum) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID/* || parflag[idx]==TYPEAIR*/ || parflag[idx] == TYPEAIRSOLO) { outpos[idx] = ppos[idx]; return; } float3 ipos = ppos[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float3 spring = make_float3(0.0f); float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float re = correctionradius*dparam.cellsize.x; // float re= getRfromMass( pmass[idx] ); int lv = 1; // float idiameter = 2*pow(0.75*pmass[idx]/dparam.waterrho/M_PI, 1.0/3); //注意,应该比实际的半径大,相当于SPH中的核函数半径 for (int di = -lv; di <= lv; di++) for (int dj = -lv; dj <= lv; dj++) for (int dk = -lv; dk <= lv; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { spring += sumcellspring(ipos, ppos, pmass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk), re); } } // //增加empty气泡的作用,遍历所有的empty粒子 // float w, dist; // for( int p=0; p<penum; p++ ) // { // if( peradius[p]>0.5f*dparam.cellsize.x ) //太小不处理 // { // dist=length(pepos[p]-ipos); // w = pmass[idx]*smooth_kernel(dist*dist, peradius[p]); //质量用被弹开粒子的质量 // if( dist>0.1f*peradius[p] ) //太近会产生非常大的弹力 // spring += w*(ipos-pepos[p]) / dist; // } // } spring *= correctionspring*re; if (length(dparam.dt*spring)>0.3f*dparam.cellsize.x) ipos += dparam.cellsize.x * 0.3f * spring / length(spring); else ipos += dparam.dt*spring; ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, fmin(tmax.z, ipos.z)); outpos[idx] = ipos; } } __device__ void sumcelldens(float &phi, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis = length(pos[p] - gpos); if (phi>dis) phi = dis; } } } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012][TVCG]Preserving Fluid Sheets with Adaptively Sampled Anisotropic Particles __global__ void genWaterDensfield(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NX + 1)*(NY + 1)*(NZ + 1)) { float h = dparam.cellsize.x; float phi = 8 * fMCDensity*h; //from flip3d_vs //get position int i, j, k; getijk(i, j, k, idx, NX + 1, NY + 1, NZ + 1); float3 p = make_float3(i, j, k)*h; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk)) { sumcelldens(phi, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); } } phi = fMCDensity*h - phi; if (i*j*k == 0 || i == NX || j == NY || k == NZ) phi = fmin(phi, -0.1f); outdens[idx] = phi; } } __device__ float3 sumcelldens2(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield2(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 1.0f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens2(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, MCParType); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_Gas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, SCENE scene) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || (parflag[p] == TYPEAIRSOLO && scene != SCENE_INTERACTION)) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_Gas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.8f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_Gas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, scene); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_liquidAndGas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, float sradiusInv, float radius, float racc,float wacc, float3 pacc) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; //float r = R / 2.; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO || parflag[p] == TYPEFLUID) { dis = length(pos[p] - gpos); // { // float s = dot(pos[p] - gpos, pos[p] - gpos)*sradiusInv;//mantaflow // w = max(0., (1. - s)); // wacc += w; // racc += radius * w; // pacc += pos[p] * w; // // } if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_liquidAndGas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. //float r = 2.5f*sqrt(3.)*1.01*0.5*h; //mantaFlow flip03_gen float r = 0.75*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); //mantaflow 里面的算法 //float racc, wacc; //float3 pacc = make_float3(0.); // float phiv = r; // sradiusInv = 1. / (4. *r * r); // int radius = int(1. * r) + 1; // float3 gridPos = make_float3(i + 0.5, j + 0.5, k + 0.5)* h; float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_liquidAndGas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, sradiusInv, r,racc,wacc,pacc); // printf("%f !!!!", pacc.x); ///////////////////////// // racc /= wacc; // pacc /= wacc; // phiv = fabs(length(gridPos-pacc)); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. // phi = phiv; //mantaflow if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens3(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float h, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { //GY:参照论文【CFG2012】Parallel Surface Reconstruction for Particle-Based Fluids // [2007CAVW]A Unified Particle Model for Fluid-Solid Interactions // 【2012 VRIPHYS】An Efficient Surface Reconstruction Pipeline for Particle-Based Fluids dis = length(pos[p] - gpos); //v-xi if (dis<h) { // w = h*h -dis*dis; //之前的代码 // w = w*w*w; // res += pos[p] * w; // wsum += w; w = dis / (4 * h); // |v-xi|/R 见[2007 CAVW]下同 R=2h=4r w = 1 - w*w; // 1-s~2 w = max(w*w*w, 0.0); // k(s) res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【VRIPHYS】An Efficient Surface Reconstruction Pipeline for Particle-Based Fluids __global__ void genWaterDensfield_GY(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType, float3 centertmp) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.75f*h; float thigh = 0.51; float tlow = 0.49; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens3(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h, MCParType); } } if (wsum>0) { center /= wsum; //~v float3 delta = center - centertmp; float Ev = max(delta.x, max(delta.y, delta.z)) / (4 * h); // // float Ev = 3.8; centertmp = center; // centertmp:存储的是上一次的center 求Ev的delta用 float gamma = (thigh - Ev) / (thigh - tlow); float f = (Ev<tlow) ? 1 : gamma*gamma*gamma - 3 * gamma*gamma + 3 * gamma; // phi = r - length( p - center ); phi = (length(p - center) - r*f); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = fmin(phi, -10.0f); outdens[idx] = phi; } } __global__ void markSolid_sphere(float3 spherepos, float sphereradius, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if ((i>NX/2-2) &&i<2.5*NX/3 && j>3.5*NY/9 && j< 6*NY/9 && k<NZ/5) mark[idx] = TYPEBOUNDARY; } } __global__ void markSolid_waterfall(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_waterfall_liquid(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z*0.7f) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z*0.7f) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_terrain(charray mark, charray mark_terrain) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark_terrain[idx] == TYPEBOUNDARY) mark[idx] = TYPEBOUNDARY; } } //得到网格上每一个结点的密度值,为MC算法做准备 __global__ void genSphereDensfield(farray outdens, float3 center, float radius) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { //float3 center = make_float3(0.5f); float phi; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -0.1; else { float3 p = make_float3(i, j, k)*dparam.cellsize.x / (NXMC / NX); phi = radius - length(p - center); } outdens[idx] = phi; } } //-----MC 算法,from cuda sdk 4.2 // classify voxel based on number of vertices it will generate // one thread per voxel (cell) __global__ void classifyVoxel(uint* voxelVerts, uint *voxelOccupied, farray volume, float isoValue) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<NXMC*NYMC*NZMC) { int i, j, k; getijk(i, j, k, idx, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate flag indicating if each vertex is inside or outside isosurface uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // read number of vertices from texture uint numVerts = tex1Dfetch(numVertsTex, cubeindex); voxelVerts[idx] = numVerts; voxelOccupied[idx] = (numVerts > 0); }//endif } // compact voxel array __global__ void compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint i = __mul24(blockId, blockDim.x) + threadIdx.x; if (voxelOccupied[i] && (i < numVoxels)) { compactedVoxelArray[voxelOccupiedScan[i]] = i; } } // compute interpolated vertex along an edge __device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) { float t = (isolevel - f0) / (f1 - f0); return lerp(p0, p1, t); } // calculate triangle normal __device__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2) { float3 edge0 = *v1 - *v0; float3 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge0, edge1); } __device__ int GetVertexID(int i, int j, int k) { return 3 * (i*(NZMC + 1)*(NYMC + 1) + j*(NZMC + 1) + k); } __device__ int GetEdgeID(int nX, int nY, int nZ, int edge) { // return GetVertexID( nX,nY,nZ ); switch (edge) { case 0: return GetVertexID(nX, nY, nZ) + 1; case 1: return GetVertexID(nX + 1, nY, nZ); case 2: return GetVertexID(nX, nY + 1, nZ) + 1; case 3: return GetVertexID(nX, nY, nZ); case 4: return GetVertexID(nX, nY, nZ + 1) + 1; case 5: return GetVertexID(nX + 1, nY, nZ + 1); case 6: return GetVertexID(nX, nY + 1, nZ + 1) + 1; case 7: return GetVertexID(nX, nY, nZ + 1); case 8: return GetVertexID(nX, nY, nZ) + 2; case 9: return GetVertexID(nX + 1, nY, nZ) + 2; case 10: return GetVertexID(nX + 1, nY + 1, nZ) + 2; case 11: return GetVertexID(nX, nY + 1, nZ) + 2; default: // Invalid edge no. return -1; } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles2(float3 *pos, float3 *norm, uint *compactedVoxelArray, uint *numVertsScanned, farray volume, float isoValue, uint activeVoxels, uint maxVerts) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; float3 *v[3]; uint edge; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); v[0] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); v[1] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); v[2] = &vertlist[(edge*NTHREADS) + threadIdx.x]; // calculate triangle surface normal float3 n = calcNormal(v[0], v[1], v[2]); /*if (index < (maxVerts - 3)) */{ pos[index] = *v[0]; norm[index] = n; pos[index + 1] = *v[1]; norm[index + 1] = n; pos[index + 2] = *v[2]; norm[index + 2] = n; } } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles_indices(float3 *pTriVertex, uint *pTriIndices, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels, uint maxVerts, uint *MCEdgeIdxMapped, uint *numVertsScanned) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge, mappededgeidx; for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; //vertex index to write back, sort by each triangle. //写入triangle包含的三个顶点的索引,索引是未经过处理的,即边的全局编号,之后单独处理 edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 1] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 2] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); } } __global__ void markActiveEdge_MC(uint *outmark, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge; for (int idxVert = 0; idxVert<numVerts; idxVert++) { //下面可能会重复写,但是应该没问题。注意这个函数执行前需要把outmark置0 edge = tex1Dfetch(triTex, (cubeindex * 16) + idxVert); outmark[GetEdgeID(i, j, k, edge)] = 1; } //debug // for( int edge=0; edge<12; edge++ ) // outmark[GetEdgeID(i,j,k,edge)] = 1; } //以三角形为核心来计算法线,原子写入到点的法线中。注意:法线不要归一化 __global__ void calnormal_k(float3 *ppos, float3 *pnor, int pnum, uint *indices, int indicesnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < indicesnum / 3) //face number { int i1 = indices[idx * 3 + 0]; int i2 = indices[idx * 3 + 1]; int i3 = indices[idx * 3 + 2]; float3 p1 = ppos[i1]; float3 p2 = ppos[i2]; float3 p3 = ppos[i3]; //compute float3 nor = cross(p2 - p1, p3 - p1); //write back atomicAdd(&pnor[i1].x, nor.x); atomicAdd(&pnor[i2].x, nor.x); atomicAdd(&pnor[i3].x, nor.x); atomicAdd(&pnor[i1].y, nor.y); atomicAdd(&pnor[i2].y, nor.y); atomicAdd(&pnor[i3].y, nor.y); atomicAdd(&pnor[i1].z, nor.z); atomicAdd(&pnor[i2].z, nor.z); atomicAdd(&pnor[i3].z, nor.z); } } //归一化顶点法线 __global__ void normalizeTriangleNor_k(float3 *pnor, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < pnum) //vertex number { if (length(pnor[idx])>0) pnor[idx] = normalize(pnor[idx]); } } void allocateTextures(uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable) { checkCudaErrors(cudaMalloc((void**)d_edgeTable, 256 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256 * sizeof(uint), cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned); checkCudaErrors(cudaBindTexture(0, edgeTex, *d_edgeTable, channelDesc)); checkCudaErrors(cudaMalloc((void**)d_triTable, 256 * 16 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_triTable, (void *)triTable, 256 * 16 * sizeof(uint), cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, triTex, *d_triTable, channelDesc)); checkCudaErrors(cudaMalloc((void**)d_numVertsTable, 256 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256 * sizeof(uint), cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc)); } //计算两个1*n向量的点积,输出到out里(注意用归约求和的思想,out是一个数组,需要在CPU上累加起来) __global__ void arrayproduct_k(float* out, float* x, float *y, int n) { extern __shared__ float sdata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; sdata[tid] = (i >= n) ? 0 : (x[i] * y[i]); __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = sdata[0]; } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID) //todo: should add typesolid or not. { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; //notice: x必须在AIR类型的格子里是0,下面的式子才正确 sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void buildprecondition_pcg(farray P, charray mark, farray ans, farray input, int n) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<n) { ans[idx] = 1.0f / 6 * input[idx]; } } __global__ void copyParticle2GL_vel_k(float3* ppos, float3 *pvel, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; if (pflag[idx] == TYPEFLUID) { rendercolor[idx * 3] = 1.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 0.0f; } else if (pflag[idx] == TYPEAIR) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 1.0f; } else if (pflag[idx] == TYPESOLID) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 1.0f; rendercolor[idx * 3 + 2] = 0.0f; } } } __global__ void copyParticle2GL_radius_k(float3* ppos, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor, float minmass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; minmass *= 1.2f; //trick float rate = (pmass[idx] - minmass*dparam.m0) / (dparam.m0 - minmass*dparam.m0); rate = fmax(0.0f, fmin(1.0f, rate)); { float3 color = mapColorBlue2Red(powf(rate, 1.0f / 3)*6.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } } __device__ inline void atomicaddfloat3(float3 *a, int idx, float3 b) { atomicAdd(&a[idx].x, b.x); atomicAdd(&a[idx].y, b.y); atomicAdd(&a[idx].z, b.z); } __global__ void smooth_computedisplacement(float3 *displacement, int *weight, float3 *ppos, uint *indices, int trianglenum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<trianglenum) { uint p1 = indices[idx * 3]; uint p2 = indices[idx * 3 + 1]; uint p3 = indices[idx * 3 + 2]; atomicaddfloat3(displacement, p1, ppos[p2] - ppos[p1]); atomicaddfloat3(displacement, p1, ppos[p3] - ppos[p1]); atomicaddfloat3(displacement, p2, ppos[p1] - ppos[p2]); atomicaddfloat3(displacement, p2, ppos[p3] - ppos[p2]); atomicaddfloat3(displacement, p3, ppos[p1] - ppos[p3]); atomicaddfloat3(displacement, p3, ppos[p2] - ppos[p3]); atomicAdd(&weight[p1], 2); atomicAdd(&weight[p2], 2); atomicAdd(&weight[p3], 2); } } __global__ void smooth_addDisplacement(float3 *displacement, int *weight, float3 *ppos, int vertexnum, float param) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<vertexnum) { if (weight[idx]>0) ppos[idx] += param * displacement[idx] / weight[idx]; displacement[idx] = make_float3(0.0f); weight[idx] = 0; } } //diffuse density field. __global__ void diffuse_dense(farray outp, farray inp, charray mark, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outp.xn * outp.yn * outp.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inp[idx]; int i, j, k; getijk(i, j, k, idx, outp.xn, outp.yn, outp.zn); if (mark(i, j, k) == TYPEBOUNDARY) outp[idx] = 0.0f; else { p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outp[idx] = resp; } } } //diffuse velocity field. __global__ void diffuse_velocity(farray outv, farray inv, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outv.xn * outv.yn * outv.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inv[idx]; int i, j, k; getijk(i, j, k, idx, outv.xn, outv.yn, outv.zn); if (i == 0 || j == 0 || k == 0 || i >= outv.xn - 1 || j >= outv.yn - 1 || k >= outv.zn - 1) outv[idx] = p0; else { p1 = inv(i + 1, j, k); p2 = inv(i, j + 1, k); p3 = inv(i, j, k + 1); p4 = inv(i - 1, j, k); p5 = inv(i, j - 1, k); p6 = inv(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outv[idx] = resp; } } } //maxLength, hashPoints是输出:最长边(每个block里),每个三角形一个用来hash的点 __global__ void createAABB_q(float3* points, int nPoints, uint3* faces, int nFaces, float *maxLength, float3* hashPoints) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= nFaces) return; __shared__ float maxArray[256]; uint p1 = faces[index].x; uint p2 = faces[index].y; uint p3 = faces[index].z; //得到三角形的三个顶点 float3 px = points[p1]; float3 py = points[p2]; float3 pz = points[p3]; AABB aabb; aabb.xMin = (px.x>py.x) ? py.x : px.x; aabb.xMin = (aabb.xMin>pz.x) ? pz.x : aabb.xMin; aabb.xMax = (px.x<py.x) ? py.x : px.x; aabb.xMax = (aabb.xMax<pz.x) ? pz.x : aabb.xMax; aabb.yMin = (px.y>py.y) ? py.y : px.y; aabb.yMin = (aabb.yMin>pz.y) ? pz.y : aabb.yMin; aabb.yMax = (px.y<py.y) ? py.y : px.y; aabb.yMax = (aabb.yMax<pz.y) ? pz.y : aabb.yMax; aabb.zMin = (px.z>py.z) ? py.z : px.z; aabb.zMin = (aabb.zMin>pz.z) ? pz.z : aabb.zMin; aabb.zMax = (px.z<py.z) ? py.z : px.z; aabb.zMax = (aabb.zMax<pz.z) ? pz.z : aabb.zMax; float tempMaxLength = aabb.xMax - aabb.xMin; tempMaxLength = (tempMaxLength>aabb.yMax - aabb.yMin) ? (tempMaxLength) : (aabb.yMax - aabb.yMin); tempMaxLength = (tempMaxLength>aabb.zMax - aabb.zMin) ? (tempMaxLength) : (aabb.zMax - aabb.zMin); maxArray[threadIdx.x] = tempMaxLength; hashPoints[index] = make_float3((aabb.xMin + aabb.xMax) / 2, (aabb.yMin + aabb.yMax) / 2, (aabb.zMin + aabb.zMax) / 2); __syncthreads(); for (int i = blockDim.x / 2; i>0; i /= 2) { if (threadIdx.x < i) maxArray[threadIdx.x] = max(maxArray[threadIdx.x], maxArray[i + threadIdx.x]); __syncthreads(); } if (threadIdx.x == 0) maxLength[blockIdx.x] = maxArray[0]; } __global__ void calcHash_radix_q( uint2* gridParticleIndex, // output float3* posArray, // input: positions uint numParticles, float3 t_min, float3 t_max) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 pos = posArray[index]; uint hash; int gz = (pos.z - t_min.z) / dparam.triHashSize.z; int gy = (pos.y - t_min.y) / dparam.triHashSize.y; int gx = (pos.x - t_min.x) / dparam.triHashSize.x; if (gx < 0 || gx > dparam.triHashRes.x - 1 || gy < 0 || gy > dparam.triHashRes.y - 1 || gz < 0 || gz > dparam.triHashRes.z - 1) hash = CELL_UNDEF; else hash = __mul24(__mul24(gz, (int)dparam.triHashRes.y) + gy, (int)dparam.triHashRes.x) + gx; // store grid hash and particle index gridParticleIndex[index] = make_uint2(hash, index); } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStart_radix_q(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index uint3* sortedFaces, uint2 * gridParticleHash, // input: sorted grid hashes uint3* oldFaces, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index].x; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1].x; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleHash[index].y; sortedFaces[index] = oldFaces[sortedIndex]; // see particles_kernel.cuh } } __global__ void calculateNormal(float3* points, uint3* faces, float3* normals, int num) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index < num) { uint3 face = faces[index]; float3 v1 = points[face.x]; float3 v2 = points[face.y]; float3 v3 = points[face.z]; float3 tmp; tmp.x = (v1.y - v2.y)*(v1.z - v3.z) - (v1.z - v2.z)*(v1.y - v3.y); tmp.y = (v1.z - v2.z)*(v1.x - v3.x) - (v1.x - v2.x)*(v1.z - v3.z); tmp.z = (v1.x - v2.x)*(v1.y - v3.y) - (v1.y - v2.y)*(v1.x - v3.x); normals[index] = normalize(tmp); } } //temp_yanglp: 检测一个小球与三角形是否相交,求出对粒子作用的顶点权重,返回值为负数,表示没有相交,正数表示相交 __device__ float IntersectTriangle_q(float3& pos, float radius, float3& v0, float3& v1, float3& v2, float3 n) { //compute the distance of pos and triangle plane float d = dot(pos - v0, n); if (abs(d)>radius) return -1; float dislimit = radius*radius - d*d; //球心在三角形平面的投影 float3 pTri = pos - d*n; float3 tempcross; float d0 = dot(pTri - v0, pTri - v0); float d1 = dot(pTri - v1, pTri - v1); float d2 = dot(pTri - v2, pTri - v2); //判断是否在三角形内 int tt = (dot(cross(pTri - v0, v1 - v0), n)>0) ? 1 : 0; tt += (dot(cross(pTri - v1, v2 - v1), n)>0) ? 2 : 0; tt += (dot(cross(pTri - v2, v0 - v2), n)>0) ? 4 : 0; //cuPrintf("tt=%d\n",tt); if (tt == 7 || tt == 0) { return abs(d); } //判断投影点与三角形顶点的距离是否符合条件 float distemp; float dis = (d0<dislimit) ? (d0) : dislimit; //dis表示到目前为止投影点到三角形的最小距离 dis = (d1<dis) ? (d1) : dis; dis = (d2<dis) ? (d2) : dis; //判断投影点与三角形边的距离 if (dot(v1 - v0, pTri - v0)*dot(v0 - v1, pTri - v1)>0) { tempcross = cross(v1 - v0, pTri - v0); distemp = dot(tempcross, tempcross) / dot(v1 - v0, v1 - v0); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v2 - v1, pTri - v1)*dot(v1 - v2, pTri - v2)>0) { tempcross = cross(v2 - v1, pTri - v1); distemp = dot(tempcross, tempcross) / dot(v2 - v1, v2 - v1); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v0 - v2, pTri - v2)*dot(v2 - v0, pTri - v0)>0) { tempcross = cross(v0 - v2, pTri - v2); distemp = dot(tempcross, tempcross) / dot(v0 - v2, v0 - v2); dis = (distemp<dis) ? (distemp) : dis; } if (dis > dislimit - 0.001) return -1; return sqrt(dis + d*d); } // calculate address in grid from position (clamping to edges) __device__ uint calcGridHash_q(int3 gridPos) { return __umul24(__umul24(gridPos.z, dparam.triHashRes.y), dparam.triHashRes.x) + __umul24(gridPos.y, dparam.triHashRes.x) + gridPos.x; } // collide a particle against all other particles in a given cell __device__ float3 collideCell(int3 gridPos, float3 pos, float radius, float3* surPoints, uint3* surIndex, float3* surfaceNor, uint* cellStart, uint* cellEnd, int scene) { uint gridHash = calcGridHash_q(gridPos); float dis_n, wib = 0; float3 force = make_float3(0.0f); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != CELL_UNDEF) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j<endIndex; j++) { //cuPrintf("j=%d\n", j); dis_n = IntersectTriangle_q(pos, radius, surPoints[surIndex[j].x], surPoints[surIndex[j].y], surPoints[surIndex[j].z], surfaceNor[j]); wib = 1 - dis_n / radius; if (dis_n >= 0 && wib > 0.00001) { force += (radius - dis_n) * (surfaceNor[j]) * 10; } } } return force; } __device__ void mindis_cell(float& mindisair, float& mindisfluid, float3 gpos, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, int gidx, float radius) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { dis = length(pos[p] - gpos);//减掉半径,后面的数是较正一下 // dis = fabs(length(pos[p] - gpos))- radius;// 依据mantaflow if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO)//todo: 是不是加上SOLO的类型以防止ls随着标记变化的突变? mindisair = (dis<mindisair) ? dis : mindisair; else if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) mindisfluid = (dis<mindisfluid) ? dis : mindisfluid; } } //这个level set的值很可能有问题,从画出来的图可以看出来一些,直接影响后面所有的内容。 //[2012]【长文】MultiFLIP for Energetic Two-Phase Fluid Simulation __global__ void genlevelset(farray lsfluid, farray lsair, charray mark, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, float fMCDensity, float offset) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) //每个格子一个值 { //float ls; float h = dparam.cellsize.x; mark[idx] = TYPEVACUUM; float r = 0.5f*h; //0.36f*h; //float r = 0.5*sqrt(3.)*1.01*2.5; //修改为0.5*1.01 依据mantaflow //get position int i, j, k; getijk(i, j, k, idx, NX, NY, NZ); float3 gpos = (make_float3(i, j, k) + make_float3(0.5f, 0.5f, 0.5f))*dparam.cellsize.x; // shifted by half cell float mindisair = 2.5f*h, mindisfluid = 2.5f*h; //2.5 cellsize //float mindisair = r, mindisfluid = r; //修正 mindis- 为 r 依据mantaflow int level = 2; for (int di = -level; di <= level; ++di) for (int dj = -level; dj <= level; ++dj) for (int dk = -level; dk <= level; ++dk) //周围27个格子就行 { if (verifycellidx(i + di, j + dj, k + dk)) { mindis_cell(mindisair, mindisfluid, gpos, pos, parflag, pmass, gridstart, gridend, getidx(i + di, j + dj, k + dk), r); } } mindisair -= r; mindisfluid -= r; lsfluid[idx] = mindisfluid; // lsair[idx] = mindisair - offset*h; //todo: 这里略微向外扩张了一下气体的ls,避免气体粒子correctpos时向内收缩导到气泡体积的减小。注意:这个修正会导致markgrid的不对,因此流体mark会大一层,其流动会受很大影响 lsair[idx] = mindisair; } } __device__ void sumcell_fluidSolid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_fluidSolid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 0; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 0; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_air(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_air(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_solid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_solid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } //计算散度 __global__ void cptdivergence_bubble(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls, farray sf) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float jx0, jx1, jy0, jy1, jz0, jz1, J; //surface tension, [2005]Discontinuous Fluids float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } J = (jx1 - jx0 + jy1 - jy0 + jz1 - jz0) / h / h; div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; div += J; //surfacetension } outdiv[idx] = div; } } //计算散度,不使用压强来施加表面张力 __global__ void cptdivergence_bubble2(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); //ux1 = airux(i+1,j,k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); //ux1 = airux(i+1,j,k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); //ux0 = airux(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); //ux0 = airux(i,j,k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); // uy0 = airuy(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); //uy0 = airuy(i,j,k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); //uz0 = airuz(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); //uz0 = airuz(i,j,k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } __global__ void cptdivergence_bubble3(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * waterux(i+1,j,k) + (1-theta) * airux(i+1,j,k); ux1 = airux(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * airux(i+1,j,k) + (1-theta) * waterux(i+1,j,k); ux1 = airux(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * waterux(i,j,k) + (1-theta) * airux(i,j,k); ux0 = airux(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * airux(i,j,k) + (1-theta) * waterux(i,j,k); ux0 = airux(i, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * wateruy(i,j+1,k) + (1-theta) * airuy(i,j+1,k); uy1 = airuy(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * airuy(i,j+1,k) + (1-theta) * wateruy(i,j+1,k); uy1 = airuy(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * wateruy(i,j,k) + (1-theta) * airuy(i,j,k); uy0 = airuy(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * airuy(i,j,k) + (1-theta) * wateruy(i,j,k); uy0 = airuy(i, j, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * wateruz(i,j,k+1) + (1-theta) * airuz(i,j,k+1); uz1 = airuz(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * airuz(i,j,k+1) + (1-theta) * wateruz(i,j,k+1); uz1 = airuz(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * wateruz(i,j,k) + (1-theta) * airuz(i,j,k); uz0 = airuz(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * airuz(i,j,k) + (1-theta) * wateruz(i,j,k); uz0 = airuz(i, j, k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } //压强与速度的计算 __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_bubble(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op_bubble(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID || A[idx] == TYPEAIR) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } //注意:这个函数只更新流体粒子(TYPEFLUID)的位置,但更新AIR粒子的速度(不是AIRSOLO)(用CIP模式). __global__ void advectparticle_RK2_bubble(float3 *ppos, float3 *pvel, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIRSOLO) //对于小的气体粒子AIRSOLO,什么也不更新,跳过 return; //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); char partype = parflag[idx]; //pos-->grid xyz float3 gvel = make_float3(0.0f); if (partype == TYPEFLUID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (partype == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); else //TYPEAIRSOLO 有自己的仿真方法,不参与这些仿真 return; if (velmode == CIP /*|| partype==TYPEAIR*/) //todo: 气体粒子用cip模式,减少乱跑的可能 ivel = gvel; else ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint; if (partype == TYPEFLUID) gvelmidpoint = getParticleVelFromGrid(midpoint, waterux, wateruy, wateruz); else gvelmidpoint = getParticleVelFromGrid(midpoint, airux, airuy, airuz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back: TYPEAIR+TYPESOLID只更新速度,TYPESOLO之前已经return,TYPEFLUID更新位置和速度。 pvel[idx] = ivel; // if( partype==TYPEFLUID ) // ppos[idx] = ipos; } } __global__ void mapvelg2p_flip_bubble(float3 *ppos, float3 *vel, char* parflag, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = make_float3(0.0f); if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (parflag[idx] == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); vel[idx] += gvel; } } __global__ void compsurfacetension_k(farray sf, charray mark, farray phigrax, farray phigray, farray phigraz, float sigma) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) { int i, j, k; getijk(i, j, k, idx); float len, h = dparam.cellsize.x; float res, grax1, gray1, graz1, grax0, gray0, graz0; float3 phigracenter = make_float3(phigrax[idx], phigray[idx], phigraz[idx]); len = length(phigracenter); if (len == 0) res = 0; else { phigracenter /= len; if (verifycellidx(i + 1, j, k)) { len = length(make_float3(phigrax(i + 1, j, k), phigray(i + 1, j, k), phigraz(i + 1, j, k))); if (len == 0) grax1 = phigracenter.x; else grax1 = phigrax(i + 1, j, k) / len; } else grax1 = phigracenter.x; if (verifycellidx(i - 1, j, k)) { len = length(make_float3(phigrax(i - 1, j, k), phigray(i - 1, j, k), phigraz(i - 1, j, k))); if (len == 0) grax0 = phigracenter.x; else grax0 = phigrax(i - 1, j, k) / len; } else grax0 = phigracenter.x; if (verifycellidx(i, j + 1, k)) { len = length(make_float3(phigrax(i, j + 1, k), phigray(i, j + 1, k), phigraz(i, j + 1, k))); if (len == 0) gray1 = phigracenter.y; else gray1 = phigray(i, j + 1, k) / len; } else gray1 = phigracenter.y; if (verifycellidx(i, j - 1, k)) { len = length(make_float3(phigrax(i, j - 1, k), phigray(i, j - 1, k), phigraz(i, j - 1, k))); if (len == 0) gray0 = phigracenter.y; else gray0 = phigray(i, j - 1, k) / len; } else gray0 = phigracenter.y; if (verifycellidx(i, j, k + 1)) { len = length(make_float3(phigrax(i, j, k + 1), phigray(i, j, k + 1), phigraz(i, j, k + 1))); if (len == 0) graz1 = phigracenter.z; else graz1 = phigraz(i, j, k + 1) / len; } else graz1 = phigracenter.z; if (verifycellidx(i, j, k - 1)) { len = length(make_float3(phigrax(i, j, k - 1), phigray(i, j, k - 1), phigraz(i, j, k - 1))); if (len == 0) graz0 = phigracenter.z; else graz0 = phigraz(i, j, k - 1) / len; } else graz0 = phigracenter.z; res = (grax1 - grax0 + gray1 - gray0 + graz1 - graz0) / h * 0.5f; //res = (grax1-phigracenter.x + gray1-phigracenter.y + graz1-phigracenter.z) / h ; } sf[idx] = res*sigma; } else sf[idx] = 0; } } __global__ void enforcesurfacetension_p(float3* ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray sf, farray phigrax, farray phigray, farray phigraz, charray mark, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID/* || pflag[idx]==TYPEAIRSOLO*/ || pflag[idx] == TYPEFLUID) return; if( (scene != SCENE_MELTANDBOIL&&scene != SCENE_MELTANDBOIL_HIGHRES && pflag[idx] == TYPEAIRSOLO) || ((scene != SCENE_ALL && pflag[idx] == TYPEAIRSOLO))) return; //1. compute the cell, and get the ls, get sf. float3 ipos = ppos[idx]; float ilsmerge = getScaleFromFrid(ipos, lsmerge); float isf = getScaleFromFrid(ipos, sf); float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); float lendir = length(dir); if (lendir == 0) return; float3 f; dir /= lendir; ilsmerge /= lendir; //周围最少一个格子是空气的 int i, j, k; getijkfrompos(i, j, k, ipos); int cnt = (mark(i, j, k) == TYPEAIR) ? 1 : 0; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (verifycellidx(i + di, j + dj, k + dk)) if (mark(i + di, j + dj, k + dk) == TYPEAIR) cnt++; if (cnt == 0) return; // if(abs(ls_p)<threshold), enforce a surface tension force, change the velocity. if (abs(ilsmerge)<dparam.cellsize.x) { f = -isf*dir; pvel[idx] += f*dparam.dt; } } } //标记levelset里比较大的正数,他们是邻近域内没有粒子的 __global__ void markLS_bigpositive(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] / dparam.cellsize.x; if (ls[idx] >1.99f) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //标记为需要sweep的单元,并非真正的标记 } else mark[idx] = TYPEFLUID; } } __global__ void setLSback_bigpositive(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] * dparam.cellsize.x; } } __global__ void preparels(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] / dparam.cellsize.x; if (ls[idx] >0) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //标记为需要sweep的单元,并非真正的标记 } else mark[idx] = TYPEFLUID; } } __global__ void setLSback(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] * dparam.cellsize.x; } } __global__ void mergeLSAndMarkGrid(farray lsmerge, charray mark, farray lsfluid, farray lsair) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx< dparam.gnum) { float h = dparam.cellsize.x; if (lsair[idx] >4.99f * h) { lsmerge[idx] = lsfluid[idx]; if (lsfluid[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEFLUID; } else if (lsfluid[idx]>4.99f*h) { lsmerge[idx] = lsair[idx]; if (lsair[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEAIR; } else if (lsair[idx]>0.8f*h && lsfluid[idx]>0.8f*h) { mark[idx] = TYPEVACUUM; lsmerge[idx] = min(lsfluid[idx], lsair[idx]); } else { lsmerge[idx] = (lsfluid[idx] - lsair[idx])*0.5f; if (lsmerge[idx]>0) mark[idx] = TYPEAIR; else mark[idx] = TYPEFLUID; } //todo: 对于气体将出到水面的时候,ls还是会有问题 int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY, lsmerge[idx] = -0.5f*h; //todo: debug: //lsmerge[idx] = -lsmerge[idx]; } } __global__ void sweepu_k_bubble(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray ls, charray mark, char sweepflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; //三个方向上的权重 if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i - 1, j, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ -1) continue; wx = -di*(ls(i, j, k) - ls(i - 1, j, k)); if (wx<0) continue; wy = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j + dj, k) - ls(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j, k + dk) - ls(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i, j - 1, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(ls(i, j, k) - ls(i, j - 1, k)); if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j - 1, k) - ls(i + di, j, k) - ls(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (ls(i, j, k) + ls(i, j - 1, k) - ls(i, j, k + dk) - ls(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) != sweepflag && mark(i, j, k - 1) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(ls(i, j, k) - ls(i, j, k - 1)); if (wz<0) continue; wy = (ls(i, j, k) + ls(i, j, k - 1) - ls(i, j + dj, k) - ls(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j, k - 1) - ls(i + di, j, k) - ls(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } //修正粒子的位置,当气体粒子跑到流体中时,"拉"它回来,反之亦然 __global__ void correctbubblepos(farray ls, farray phigrax, farray phigray, farray phigraz, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: 这里有问题 if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) //气体粒子只在错位比较明显的情况下才纠正,主要是为了防止气泡体积的收缩。 ipos = ipos - d*dir; else if (iflag == TYPEFLUID) { ipos = ipos - d*dir; dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; ipos = ipos + s*(rs - s*d)*dir; } // cnt++; } else if (iflag == TYPEFLUID && s*d<rs*0.5f && s*d >= 0) //todo: rs*0.5f有点小问题,但不加这个0.5的话流体的体积会变化明显 { ipos = ipos + s*(rs - s*d)*dir; } ppos[idx] = ipos; } } //修正粒子的位置,当气体粒子跑到流体中时,"拉"它回来,反之亦然. //这里修正液体粒子位置时用的是气体的ls __global__ void correctbubblepos_air(farray lsmerge, farray phigrax, farray phigray, farray phigraz, farray lsair, farray phigrax_air, farray phigray_air, farray phigraz_air, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsmerge) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: 这里有问题 if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) //气体粒子只在错位比较明显的情况下才纠正,主要是为了防止气泡体积的收缩。 ipos = ipos - d*dir; // cnt++; } if (iflag == TYPEFLUID) //对液体粒子使用气体的level set来处理,慢慢把液体“挤出”气泡之外,使得lsmerge计算更为准确 { dir = getVectorFromGrid(ipos, phigrax_air, phigray_air, phigraz_air); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsair) / dirlen; if (d<-1.3f*rs) ipos = ipos - (d - rs)*dir; } ppos[idx] = ipos; } } //根据levelset计算梯度场,相当于一个方向 __global__ void computePhigra(farray phigrax, farray phigray, farray phigraz, farray ls) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float h = dparam.cellsize.x; float lsx1, lsx0, lsy1, lsy0, lsz1, lsz0, lscenter = ls[idx]; lsx1 = (verifycellidx(i + 1, j, k)) ? ls(i + 1, j, k) : lscenter; lsx0 = (verifycellidx(i - 1, j, k)) ? ls(i - 1, j, k) : lscenter; lsy1 = (verifycellidx(i, j + 1, k)) ? ls(i, j + 1, k) : lscenter; lsy0 = (verifycellidx(i, j - 1, k)) ? ls(i, j - 1, k) : lscenter; lsz1 = (verifycellidx(i, j, k + 1)) ? ls(i, j, k + 1) : lscenter; lsz0 = (verifycellidx(i, j, k - 1)) ? ls(i, j, k - 1) : lscenter; //todo: 这里需要考虑一下 phigrax[idx] = ((lsx1 - lsx0)*0.5f) / h; phigray[idx] = ((lsy1 - lsy0)*0.5f) / h; phigraz[idx] = ((lsz1 - lsz0)*0.5f) / h; //phigrax[idx] = (lsx1-lscenter)/h; //phigray[idx] = (lsy1-lscenter)/h; //phigraz[idx] = (lsz1-lscenter)/h; } } __global__ void copyParticle2GL_phi(float3* ppos, char *pflag, float *pmass, float *pTemperature, int pnum, float *renderpos, float *rendercolor, farray ls, farray phigrax, farray phigray, farray phigraz, char typeflag, float Tmax, float Tmin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //todo: if (pflag[idx] == typeflag/* || ppos[idx].y<NY*0.5f*dparam.cellsize.x */) { renderpos[idx * 3] = -2.0f; renderpos[idx * 3 + 1] = 0.0f; renderpos[idx * 3 + 2] = 0.0f; float3 color = make_float3(0.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; return; } renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; float3 color; if (pflag[idx] == TYPEAIR) color = mapColorBlue2Red(0.0f); else if (pflag[idx] == TYPEFLUID) color = mapColorBlue2Red(2.0f); else if (pflag[idx] == TYPESOLID) color = mapColorBlue2Red(4.0f); else color = mapColorBlue2Red(6.0f); //color=mapColorBlue2Red( (pTemperature[idx]-Tmin)/(Tmax-Tmin)*6.0f ); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } //压强与速度的计算,加入surface tension. [2005]Discontinuous Fluids __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz, farray sf, farray lsmerge, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; float J = 0.0f, theta; if (idx<dparam.gvnum.x) { J = 0.0f; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i - 1, j, k)) / (lsmerge(i, j, k) - lsmerge(i - 1, j, k)); J = theta*sf(i - 1, j, k) + (1.0f - theta)*sf(i, j, k); } ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k) - J) / h; } } if (idx<dparam.gvnum.y) { J = 0.0f; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j - 1, k)) / (lsmerge(i, j, k) - lsmerge(i, j - 1, k)); J = theta*sf(i, j - 1, k) + (1.0f - theta)*sf(i, j, k); } uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k) - J) / h; } } if (idx<dparam.gvnum.z) { J = 0.0f; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j, k - 1)) / (lsmerge(i, j, k) - lsmerge(i, j, k - 1)); J = theta*sf(i, j, k - 1) + (1.0f - theta)*sf(i, j, k); } uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1) - J) / h; } } } __global__ void sweepVacuum(charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] != TYPEAIR) return; //mark for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (mark(i + di, j + dj, k + dk) == TYPEVACUUM) mark[idx] = TYPEVACUUM; } } __global__ void markDeleteAirParticle(float3* ppos, char* pflag, float *pmass, uint *preservemark, int pnum, charray mark, farray lsmerge, farray lsair, uint *cnt) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { //fluid and solid particles are preserved, air and airsolo particles are verified. if (pflag[idx] == TYPESOLID) { preservemark[idx] = 1; return; } int i, j, k; getijkfrompos(i, j, k, ppos[idx]); if (pflag[idx] == TYPEFLUID) { float lsm = getScaleFromFrid(ppos[idx], lsmerge); float lsa = getScaleFromFrid(ppos[idx], lsair); if ( /*lsm>1.2f*dparam.cellsize.x || */lsa<-1.0*dparam.cellsize.x) preservemark[idx] = 0, cnt[0]++; else preservemark[idx] = 1; return; } int cnt = 0; for (int di = -1; di <= 1; di += 1) for (int dj = -1; dj <= 1; dj += 1) for (int dk = -1; dk <= 1; dk += 1) if (verifycellidx(i + di, j + dj, k + dk) && mark(i + di, j + dj, k + dk) == TYPEVACUUM) cnt++; if (cnt == 0 && pmass[idx]>0.000001f) //notice: 这里附带的删除了质量过小的气体粒子,与气体粒子的被吸收有关 preservemark[idx] = 1; else preservemark[idx] = 0; } } // compact voxel array __global__ void deleteparticles(uint *preserveflag, uint *preserveflagscan, int pnum, float3 *outpos, float3 *pos, float3 *outvel, float3 *vel, float *outmass, float* mass, char *outflag, char *flag, float *outTemperature, float *temperature, float *outheat, float *heat, float *outsolubility, float *solubility, float *outgascontain, float *gascontain) { uint idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (preserveflag[idx] == 1) { //deleteflagscan 存的是删除某些粒子之后的"索引". uint outidx = preserveflagscan[idx]; outpos[outidx] = pos[idx]; outvel[outidx] = vel[idx]; outmass[outidx] = mass[idx]; outflag[outidx] = flag[idx]; outTemperature[outidx] = temperature[idx]; outheat[outidx] = heat[idx]; outsolubility[outidx] = solubility[idx]; outgascontain[outidx] = gascontain[idx]; } } } __device__ int cntairparticle(float3 *ppos, char *pflag, int igrid, uint *gridstart, uint *gridend, const float3 &ipos, float r) { uint start = gridstart[igrid]; int res = 0; float dis; if (start == CELL_UNDEF) return res; for (int p = start; p<gridend[igrid]; p++) { dis = length(ppos[p] - ipos); if (dis<r && (pflag[p] == TYPEAIR || pflag[p] == TYPEAIRSOLO)) { ++res; } } return res; } __device__ inline bool isInBoundaryCell(int x, int y, int z) { int level = 2; if (x <= level || x >= NX - 1 - level || y <= level || y >= NY - 1 - level) return true; else return false; } __global__ void verifySoloAirParticle(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray airux, farray airuy, farray airuz, uint *gridstart, uint *gridend, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; if (iflag == TYPEFLUID || iflag == TYPESOLID) //TYPEAIR, TYPEAIRSOLO can go on. return; float3 ipos = ppos[idx]; float ls = getScaleFromFrid(ipos, lsmerge); float h = dparam.cellsize.x; int i, j, k; getijkfrompos(i, j, k, ipos); //a key adjustment, the tolerent will affect the result directly. int cnt = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) cnt += cntairparticle(ppos, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, h); float tol1 = -1.45f, tol2 = -0.5f; if (scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene==SCENE_ALL) tol1 = 0.05f, tol2 = -0.8f; else if (scene == SCENE_INTERACTION) tol1 = 0.2f, tol2 = -0.5f; if ((cnt >= 10 || ls>tol1*h) && pflag[idx] == TYPEAIRSOLO && !isInBoundaryCell(i, j, k)) //decide whether the air solo particle should be transfered to air particle. { if (cnt >= 3) pflag[idx] = TYPEAIR; } else if (iflag == TYPEAIR && (isInBoundaryCell(i, j, k) || ls<tol2*h || cnt <= 1)) { //todo: 插值速度 or not??? //pvel[idx]= pvel[idx]*0.8f + 0.2f*getParticleVelFromGrid(ipos,airux,airuy,airuz); pvel[idx] = getParticleVelFromGrid(ipos, airux, airuy, airuz); pflag[idx] = TYPEAIRSOLO; } } } __device__ float sumdensity(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { // notice: should include liquid particle, not just spray particle. if (pflag[p] != TYPEAIR && pflag[p] != TYPEAIRSOLO) continue; dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); //todo: m0 or pmass[p]? } return res; } __global__ void calcDensPress_Air(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIR && pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.airm0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho*0.5f); } } __device__ float3 sumforce(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h && (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR)) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSoloAirP(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH, float maxVelForBubble) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO && pflag[idx] != TYPEAIR) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); //todo: 直接更新速度和位置?? force *= dparam.airm0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; //restrict the vel below a threshold. // if( length(ivel) > maxVelForBubble ) // ivel = normalize(ivel) * maxVelForBubble; // // advect particle, using rho!!!! // ppos[idx]=ipos; pvel[idx] = ivel; } } __device__ float sumdensity_SLCouple(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); } return res; } //solid-liquid coupling, in SPH framework __global__ void calcDensPressSPH_SLCouple(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity_SLCouple(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.m0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho); } } __device__ float3 sumforce_SLCouple(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, kvis=0.0f; if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSPH_SLCouple(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEFLUID) //只有fluid计算,solid不在这里更新 return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce_SLCouple(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); // force=make_float3(0.0f); //todo: 直接更新速度和位置?? //add gravity here? or external force part; force *= dparam.m0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; // advect particle, using rho!!!! ppos[idx] = ipos; pvel[idx] = ivel; } } __global__ void updateFixedHeat(farray fixedHeat, int frame) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= NX / 4 && i<NX*0.75 && j >= NY / 4 && j<NY*0.75 && k <= 3 /*k<=20 && k>=19*/) fixedHeat[idx] = 273.0f + 100.0f * min(frame / 40.f, 1.0f); else fixedHeat[idx] = UNDEF_TEMPERATURE; } } __global__ void addHeatAtBottom(farray Tp, int frame, float heatIncreaseBottom) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= 1 && i<NX - 1 && j >= 1 && j<NY - 1 && k <= 3 /*k<=20 && k>=19*/) Tp[idx] += heatIncreaseBottom;//1.5f; //Tp[idx] = 350.0f;//273.0f + 100.0f * min(frame/40.f, 1.0f ); Tp[idx] = min(378.0f, Tp[idx]); } } // __global__ void compb_heat(farray Tp_old, farray Tp, farray fixedheat, charray mark, float *heatAlphaArray) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float alpha = heatAlphaArray[mark[idx]]; //如果有固定的温度,那么tp与b都要根据这个fixedheat来计算 // if( fixedheat[idx]!=UNDEF_TEMPERATURE ) // Tp[idx]=fixedheat[idx], Tp_old[idx] = fixedheat[idx]*dparam.cellsize.x*dparam.cellsize.x/alpha/dparam.dt; // else Tp_old[idx] = Tp[idx] * dparam.cellsize.x*dparam.cellsize.x / alpha / dparam.dt; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_heat(farray ans, charray mark, farray x, int n, float *heatAlphaArray, farray fixedHeat, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { float h = dparam.cellsize.x; float dt = dparam.dt; float alpha = heatAlphaArray[mark[idx]]; if (mark[idx] != TYPEBOUNDARY/* && mark[idx]!=TYPEVACUUM*/) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = (h*h / alpha / dt + 6.0f)*center; //trick: 决定要不要让freeair参与计算 if (scene == SCENE_BOILING || scene == SCENE_BOILING_HIGHRES || scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene ==SCENE_ALL) { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY || mark(i + 1, j, k) == TYPEVACUUM) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY || mark(i, j + 1, k) == TYPEVACUUM) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY || mark(i, j, k + 1) == TYPEVACUUM) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY || mark(i - 1, j, k) == TYPEVACUUM) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY || mark(i, j - 1, k) == TYPEVACUUM) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY || mark(i, j, k - 1) == TYPEVACUUM) ? center : x(i, j, k - 1)); } else { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1)); } ans[idx] = sum; } } } //Ans = x + a*y __global__ void pcg_op_heat(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { // if( A[idx]==TYPEFLUID || A[idx]==TYPEAIR ) if (A[idx] != TYPEBOUNDARY) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void setBoundaryHeat(farray tp) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == NX - 1) tp[idx] = tp(i - 1, j, k); else if (i == 0) tp[idx] = tp(i + 1, j, k); else if (j == NY - 1) tp[idx] = tp(i, j - 1, k); else if (j == 0) tp[idx] = tp(i, j + 1, k); else if (k == NZ - 1) tp[idx] = tp(i, j, k - 1); else if (k == 0) tp[idx] = tp(i, j, k + 1); } } __global__ void compTpChange(farray tp, farray tpsave, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) tpsave[idx] = tp[idx] - tpsave[idx]; else tpsave[idx] = 0; } } __device__ void sumHeat(float &heatsum, float &weight, float3 gpos, float3 *pos, float *pTemperature, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = sharp_kernel(dis2, RE); weight += w; heatsum += w*pTemperature[p]; } } __global__ void mapHeatp2g_hash(float3 *ppos, float *pTemperature, int pnum, farray heat, uint* gridstart, uint *gridend, float defaulttemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; float weight = 0.0f, heatsum = 0; float3 gpos; getijk(i, j, k, idx); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumHeat(heatsum, weight, gpos, ppos, pTemperature, gridstart, gridend, getidx(i + di, j + dj, k + dk)); heatsum = (weight>0) ? (heatsum / weight) : defaulttemperature; heat(i, j, k) = heatsum; } } __global__ void mapHeatg2p(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; pTemperature[idx] = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. } } __global__ void mapHeatg2p_MeltAndBoil(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float newtemp = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. if (parflag[idx] == TYPESOLID) pTemperature[idx] = 0.95f*(pTemperature[idx]) + 0.05f*newtemp; else pTemperature[idx] = newtemp; } } __global__ void initHeatParticle(float *pTemperature, float *pHeat, float defaultSolidT, float defaultLiquidT, float LiquidHeatTh, char *pflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) { pTemperature[idx] = defaultSolidT; pHeat[idx] = 0; } else { pTemperature[idx] = defaultLiquidT; pHeat[idx] = LiquidHeatTh; } } } //Temperature0=273.15K, Solubility0=1.0f (每1个流体粒子里含的气体够生成一个完事的气体粒子) __global__ void initsolubility_k(float *psolubility, float* pgascontain, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate, float initgasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID || pflag[idx] == TYPESOLID) { psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. pgascontain[idx] = initgasrate*psolubility[idx]; } else { psolubility[idx] = 0; pgascontain[idx] = 0; } } } //Temperature0=273.15K, Solubility0=1.0f (每1个流体粒子里含的气体够生成一个完事的气体粒子) __global__ void updatesolubility(float *psolubility, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID) psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. } } //addparnums初始化应该是0 __global__ void GenerateGasParticle_k(float *psolubility, float *paircontain, float3 *ppos, float3 *pvel, float *pmass, char *pflag, float *pTemperature, float *pLHeat, int pnum, uint *gridstart, uint *gridend, int *addparnums, float *randfloat, int randcnts, int frame, farray gTemperature, float LiquidHeatTh, int *seedcell, int seednum, float vaporGenRate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { float gcontain = 0, gsolubility = 0, gairexist = 0; int liquidParCnt = 0, gasParCnt = 0; float airparticlemass0 = dparam.airm0; //todo float vaporsum = 0;//, vaporrate = 0.01f; float3 gaspos = make_float3(0), gasvel = make_float3(0); int i, j, k; getijk(i, j, k, idx); if (k <= 1 || isInBoundaryCell(i, j, k)) return; //最下面的一行不生成气泡粒子 float3 gpos = make_float3(i, j, k)*dparam.cellsize.x; uint start = gridstart[idx]; if (start == CELL_UNDEF) return; //1. 统计气体含量、流体粒子含有的气体量、可溶解量 for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; vaporsum += max(0.0f, pLHeat[p] - LiquidHeatTh) * vaporGenRate * airparticlemass0; liquidParCnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; gaspos += ppos[p]; gasvel += pvel[p]; gasParCnt++; } } bool hasseed = false; for (int i = 0; i<seednum; i++) if (seedcell[i] == idx) hasseed = true; //如有必要,增加一个气体粒子 int addcnt = 0; int randbase = (idx*frame) % (randcnts - 200); //randpos and randfloat are in [0,1] float3 randpos = make_float3(randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts]); float randnum = randfloat[(randbase + addcnt++) % randcnts]; float r = dparam.cellsize.x * 0.25f; if (gcontain - gsolubility + vaporsum > airparticlemass0 && (hasseed || gasParCnt>0)) { int addindex = atomicAdd(&addparnums[0], 1) + pnum; pmass[addindex] = airparticlemass0;//dparam.m0; //todo: if (gasParCnt>0) { ppos[addindex] = gaspos / gasParCnt + (max(0.5f, randnum)*r) * (randpos - make_float3(0.5f)) * 2; //与凝结核有关 pvel[addindex] = make_float3(0.0f);//gasvel/gasParCnt; //与已有的气体粒子有关 } else { ppos[addindex] = gpos + dparam.cellsize.x*randpos; pvel[addindex] = make_float3(0.0f); } pflag[addindex] = TYPEAIRSOLO; pTemperature[addindex] = gTemperature[idx]; //网格温度 pLHeat[addindex] = 0; //气体粒子的heat无所谓 paircontain[addindex] = 0.0f; psolubility[addindex] = 0.0f; //重置液体粒子的气体含量 for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { paircontain[p] = min(paircontain[p], psolubility[p]); pLHeat[p] = min(pLHeat[p], LiquidHeatTh); //todo: decrease the liquids mass. } } } } } //addparnums初始化应该是0 __global__ void updatebubblemass(float *psolubility, float *paircontain, float3 *ppos, float *pmass, char *pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum / 8) //每个线程负责8个格子 { float gcontain = 0, gsolubility = 0, gairexist = 0; int fpcnt = 0, apcnt = 0; float airparticlemass0 = dparam.airm0; //todo int i, j, k; getijk(i, j, k, idx, NX / 2, NY / 2, NZ / 2); i *= 2, j *= 2, k *= 2; // float3 gpos; int gidx; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { gidx = getidx(i + di, j + dj, k + dk); // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; if (gridstart[gidx] == CELL_UNDEF) continue; //1. 统计气体含量、流体粒子含有的气体量、可溶解量 for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; fpcnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; apcnt++; } } } //2. 如果需要释放流体粒子中溶解的气体形成或增大气泡 float maxradius = 1.5f*dparam.cellsize.x; float maxmass = getMassfromR(maxradius); float massaddlimit = 3.0f*dparam.airm0; //每个气体粒子最多增加3个单位质量 float addmass; if (gcontain>gsolubility) { //todo: 参数 if (abs(gcontain - gsolubility) < 2.5*airparticlemass0/*1.3f*gsolubility*/) //如果相差不大,不进行调整 return; //2.1: 增大已有气泡的体积到最大 float needadd = gcontain - gsolubility; if (apcnt>0) { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { addmass = min(massaddlimit, maxmass - pmass[p]); addmass = max(0.0f, min(needadd, addmass)); needadd -= addmass; //有一定的误差 pmass[p] += addmass; if (needadd <= 0) break; } } } } //2.3: 调整每个流体粒子里的气体含量 float actualadd = gcontain - gsolubility - needadd, eachchange; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (actualadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (actualadd <= 0) break; if (pflag[p] == TYPEFLUID) { if (paircontain[p] - psolubility[p]>0) { eachchange = min(actualadd, paircontain[p] - psolubility[p]); paircontain[p] -= eachchange; actualadd -= eachchange; } } } } } //end if( gcontain>gsolubility ) else if (gairexist>0) //3: 如果需要吸收气体,且有气体粒子在本网格内 { //todo: 参数 if (abs(gcontain - gsolubility) < 3.6f*airparticlemass0/*1.3f*gsolubility*/) //如果相差不大,不进行调整 return; //3.1: 减少气体粒子的质量 float needminus = gsolubility - gcontain; //可以吸收的气体量 float masschangesum = 0; //实际吸收的气体量 if (gairexist<needminus) needminus = gairexist; if (needminus>0)//minus some of them to 0 mass, use another kernel to delete it. { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needminus <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && needminus>0; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { float masschange = min(pmass[p], needminus); //本气体粒子会被吸收多少质量 pmass[p] -= masschange; needminus -= masschange; masschangesum += masschange; } } } } //3.2: 调整流体粒子中溶解的气体含量. change the fluid particls. for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (masschangesum <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && masschangesum>0; p++) { if (pflag[p] == TYPEFLUID) { float containchange = min(max(0.0f, psolubility[p] - paircontain[p]), masschangesum); //本流体粒子会被填充多少气体量 paircontain[p] += containchange; masschangesum -= containchange; } } } } } } //使用预计算好的位置根据温度和溶解度生成empty气泡,当气泡大于一定体积时,生成AIR粒子。 //对其它模块的影响:markgrid, correctpos, heattransfer. __global__ void updateEmptyBubbles(float3 *pepos, float3 *pedir, float *peradius, int penum, float3 *parpos, float3 *parvel, float *parmass, float* parTemperature, char *parflag, float *parsolubility, float *paraircontain, int parnum, int *addparnums, uint *gridstart, uint *gridend, farray gTemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<penum) { int airRscale = 2; float3 ipos = pepos[idx], idir = pedir[idx]; float iradius = peradius[idx]; float rthresholdleave = 1.0f*dparam.cellsize.x; //todo: //到此半径则转化成实际气体并离开固壁 控制气泡半径 float rthreshold = max(0.0f, iradius + 0.1f*dparam.cellsize.x); //此次气泡最大半径,防止突然变大带来的不稳定 rthreshold = min(rthreshold, rthresholdleave); int i, j, k; getijkfrompos(i, j, k, ipos); //收集需要管的范围内的气体含量,增大体积 float massorigin = dparam.waterrho * 4 / 3 * M_PI*(pow(iradius, 3))*0.5; float masscantake = dparam.waterrho * 4 / 3 * M_PI*(pow(rthreshold, 3) - pow(iradius, 3))*0.5, massadd = 0; //todo int range = 2; for (int di = -range; di <= range &&masscantake>0; di++) for (int dj = -range; dj <= range&&masscantake>0; dj++) for (int dk = -range; dk <= range&&masscantake>0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i, j, k); for (uint p = gridstart[grididx]; p<gridend[grididx] && masscantake>0; p++) //遍历所有流体粒子 { if (parflag[p] != TYPEFLUID) continue; float gasreslease = max(0.0f, paraircontain[p] - parsolubility[p]); if (gasreslease <= 0) continue; gasreslease = min(gasreslease, masscantake); massadd += gasreslease; masscantake -= gasreslease; //paraircontain[p] -= gasreslease; } } float newiradius = pow((massadd + massorigin) / dparam.waterrho / 4 * 3 / M_PI, 1.0 / 3); ipos += (newiradius - iradius)*idir; float ss = dparam.samplespace; if (newiradius + 1e-5 >= rthresholdleave) //生成实际的气体粒子 { int num = ceil(newiradius / ss); for (float x = -num*ss; x <= newiradius; x += ss)for (float y = -num*ss; y <= newiradius; y += ss)for (float z = -num*ss; z <= newiradius; z += ss) { if (x*x + y*y + z*z>newiradius*newiradius) continue; int addindex = atomicAdd(&addparnums[0], 1) + parnum; parmass[addindex] = dparam.airm0; //todo: parpos[addindex] = ipos + make_float3(x, y, z); parflag[addindex] = TYPEAIR; parvel[addindex] = make_float3(0.0f); parTemperature[addindex] = gTemperature[getidx(i, j, 1)]; //todo: 找到当前气泡最下面网格的温度 paraircontain[addindex] = 0.0f; parsolubility[addindex] = 0.0f; } ipos.z = 1.1f*dparam.cellsize.x; //重置位置 newiradius = 0; } peradius[idx] = newiradius; pepos[idx] = ipos; } } __device__ void mat4_mul(matrix4* dst, const matrix4* m0, const matrix4* m1) { int row; int col; int i; for (row = 0; row < 4; row++) for (col = 0; col < 4; col++) for (i = 0; i < 4; i++) dst->m[row * 4 + col] += m0->m[row * 4 + i] * m1->m[i * 4 + col]; } __device__ void mat4_mulvec3_as_mat3(float3* dst, const matrix4* m, const float3* v) { float new_x; float new_y; float new_z; new_x = v->x*m->m[0 + 4 * 0] + v->y*m->m[0 + 4 * 1] + v->z*m->m[0 + 4 * 2]; new_y = v->x*m->m[1 + 4 * 0] + v->y*m->m[1 + 4 * 1] + v->z*m->m[1 + 4 * 2]; new_z = v->x*m->m[2 + 4 * 0] + v->y*m->m[2 + 4 * 1] + v->z*m->m[2 + 4 * 2]; dst->x = new_x; dst->y = new_y; dst->z = new_z; } __global__ void MeltingSolidByHeat(float *pTemperature, float *pLHeat, char *pflag, int pnum, float LiquidHeatTh, float meltTemperature, int *numchange) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]>LiquidHeatTh) { pflag[idx] = TYPEFLUID; pLHeat[idx] = LiquidHeatTh; atomicAdd(&numchange[0], 1); } } } __global__ void FreezingSolidByHeat(float3* ppos, float *pLHeat, char *pflag, int pnum, int *numchange, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPEFLUID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]<0) { //determine a new position which is appropriate for solid. //找距离最近的固体粒子 int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float mindis = 1000; int minidx = -1; int width = 1; int cntsolid = 0; float h = dparam.cellsize.x; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start == CELL_UNDEF) continue; for (int p = start; p<gridend[gidx]; p++) { if (pflag[p] == TYPESOLID) { float dis = length(ppos[p] - ipos); if (dis< h) cntsolid++; if (length(ppos[p] - ipos)<mindis) mindis = length(ppos[p] - ipos), minidx = p; } } } if (minidx != -1 && mindis<dparam.cellsize.x && cntsolid>2)//周围有固体粒子 { pflag[idx] = TYPESOLID; pLHeat[idx] = 0; atomicAdd(&numchange[0], 1); if (mindis > dparam.samplespace) { ipos = normalize(ipos - ppos[minidx])*dparam.samplespace + ppos[minidx]; ppos[idx] = ipos; } } } } } //计算air solo particle与流体场之间的drag force,直接在本函数里修改了速度。以dragparam为影响大小的参数。 __global__ void calDragForce(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray ux, farray uy, farray uz, float dragparamsolo, float dragparamgrid, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx], ivel = pvel[idx]; //compute the grid index int i, j, k; getijkfrompos(i, j, k, ipos); //compute drag "force" (actually not "force", is velocity change, tuning alpha is very important) float3 gridvel = getParticleVelFromGrid(ipos, ux, uy, uz); float3 gridpos = make_float3(i, j, k); float3 dragf_b = dragparamsolo * length(gridvel - ivel) * (gridvel - ivel); //指向grid's velocity,施加给bubble的,质量被系统归一成1 /* float alpha = 0.5f;*/ float3 velChange_g = -dragf_b*dragparamgrid*dparam.dt; //施加给网格的,要增加一个比例系数,因为同样受力的情况下,网格的质量大,速度改变要小一些 //update for grid float ux0, ux1, uy0, uy1, uz0, uz1; float3 weight = ipos / dparam.cellsize.x - gridpos; //权重 in [0-1] ux0 = velChange_g.x*(1 - weight.x), ux1 = velChange_g.x*weight.x; uy0 = velChange_g.y*(1 - weight.y), uy1 = velChange_g.y*weight.y; uz0 = velChange_g.z*(1 - weight.z), uz1 = velChange_g.z*weight.z; atomicAdd(&(ux.data[getidx(i, j, k, NX + 1, NY, NZ)]), ux0); atomicAdd(&(ux.data[getidx(i + 1, j, k, NX + 1, NY, NZ)]), ux1); atomicAdd(&(uy.data[getidx(i, j, k, NX, NY + 1, NZ)]), uy0); atomicAdd(&(uy.data[getidx(i, j + 1, k, NX, NY + 1, NZ)]), uy1); atomicAdd(&(uz.data[getidx(i, j, k, NX, NY, NZ + 1)]), uz0); atomicAdd(&(uz.data[getidx(i, j, k + 1, NX, NY, NZ + 1)]), uz1); //update for particle,注意是需要反向的。todo:只在Interaction场景里用? if (scene == SCENE_INTERACTION || scene == SCENE_INTERACTION_HIGHRES) pvel[idx] += dragf_b*dparam.dt; } } __global__ void accumulate_GPU_k(int num, float3* out, float3* a)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float* b)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float3* b) { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k_float(int num, float* out, float* a)//dsum, a.data, flag, n { extern __shared__ float fddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; fddata[tid] = (i >= num) ? 0 : a[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) fddata[tid] += fddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = fddata[0]; } __global__ void compute_cI_k(int pnum, char* parflag, float3 *parPos, float3 *parVel, float3* c, float3* weight, float3 rg) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID) { float dis = length(parPos[idx] - rg); if (dis>1e-6) { c[idx] = cross(parPos[idx] - rg, parVel[idx]); weight[idx] = make_float3(dis, 0, 0); } else c[idx] = weight[idx] = make_float3(0); } else { c[idx] = weight[idx] = make_float3(0); //c[idx] = make_float3(0,0,0); } } } __global__ void setVelZeroSolid_k(float3 *parvel, char *parflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) parvel[idx] = make_float3(0); } __global__ void computeVelSolid_k(float3* parPos, char* parflag, float3* parVel, int pnum, float3 rg, float3 R, float3 T) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 v_half = cross(R, parPos[idx] - rg); //粒子的角速度` v_half += T; //固体粒子的总速度 v_half = 0.5*(parVel[idx] + v_half); parVel[idx] = v_half; // parVel[idx] = make_float3(0); } } __device__ inline float3 transposeParticle(float3 p, matrix3x3 rm) { float3 res; res.x = p.x*rm.x00 + p.y*rm.x10 + p.z*rm.x20; res.y = p.x*rm.x01 + p.y*rm.x11 + p.z*rm.x21; res.z = p.x*rm.x02 + p.y*rm.x12 + p.z*rm.x22; return res; } //由rotation matrix "rm"来计算各粒子的位置 __global__ void computePosSolid_k(float3* parvel, float3* parPos, char* parflag, int pnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 transp = parPos[idx] - rg0; transp = transposeParticle(transp, rm); parPos[idx] = transp + rg; //if (length(parPos[idx])<10.5) //parPos[idx] -= parvel[idx] * 0.00001; } } __global__ void computeSolidVertex_k(float3* vertexpos, int vnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<vnum) { float3 transp = vertexpos[idx] - rg0; transp = transposeParticle(transp, rm); vertexpos[idx] = transp + rg; } } __global__ void set_nonsolid_2_zero(char* pflag, int pnum, float3* Pos, float3* Vel) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] != TYPESOLID) { Pos[idx] = make_float3(0, 0, 0); Vel[idx] = make_float3(0, 0, 0); //Mass[idx] = 0.; } } //在粒子层面处理fluid, air, airsolo粒子与solid的碰撞关系,保证不会穿过边界到solid的内部。 __global__ void CollisionWithSolid_k(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, farray sux, farray suy, farray suz, SCENE scene, float bounceVelParam, float bouncePosParam) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 0.5f) //靠近固体,距离只有半个格子 { float3 svel = getParticleVelFromGrid(ipos, sux, suy, suz); float3 rvel = ivel - svel; float d = dparam.cellsize.x * 0.5f; float3 phigrad; phigrad.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); phigrad.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); phigrad.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); if (length(phigrad) > 0) { phigrad = normalize(phigrad); //指向外侧 if (dot(rvel, phigrad)<0 || scene == SCENE_FREEZING) //相对速度指向内侧 { ivel -= bounceVelParam * dot(rvel, phigrad)*phigrad; //法向速度置为与固体一样 if (scene == SCENE_FREEZING) ivel -= 0.1f* (rvel - dot(rvel, phigrad)*phigrad); //切向速度 } ipos += bouncePosParam * phigrad * (0.5f - iphi) * dparam.cellsize.x; } } //并根据新的速度更新位置 ipos += ivel*dparam.dt; //边界 float rate = 0.5f, ratevel = -0.5f; if (pflag[idx] == TYPEAIRSOLO) rate = 0.8f, ratevel = -0.5f; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); // if( ipos.x>tmax.x ) // ivel.x *=ratevel, ipos.x=tmax.x; // if( ipos.x<tmin.x ) // ivel.x *= ratevel, ipos.x=tmin.x; // if( ipos.y>tmax.y ) // ivel.y *=ratevel, ipos.y=tmax.y; // if( ipos.y<tmin.y ) // ivel.y *= ratevel, ipos.y=tmin.y; // if( ipos.z>tmax.z ) // ivel.z *=ratevel, ipos.z=tmax.z; // if( ipos.z<tmin.z ) // ivel.z *= ratevel, ipos.z=tmin.z; if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //存储新的速度和位置 pvel[idx] = ivel; ppos[idx] = ipos; } } //专门为melting and freezing场景写的,粒度要更细一些。在粒子层面处理fluid, air, airsolo粒子与solid的碰撞关系,保证不会穿过边界到solid的内部。 __global__ void CollisionWithSolid_Freezing(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, uint* gridstart, uint* gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 1.0f) //有发生碰撞的可能,再进行检测 { float r = 0.25f*dparam.cellsize.x; float3 collisionpos = make_float3(0), dir; float depth = 0, dis, adhesionDis = 0; int cntcollide = 0, cntadhesion = 0; float h = 4 * r; for (int di = -1; di <= 1; di++)for (int dj = -1; dj <= 1; dj++)for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i + di, j + dj, k + dk); int start = gridstart[grididx]; if (start == CELL_UNDEF) continue; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<2 * r) //碰撞 { collisionpos += ppos[p]; depth = max(depth, 2 * r - dis); cntcollide++; } else if (dis< h) { adhesionDis += dis; cntadhesion++; } } } } float3 n; float d = dparam.cellsize.x * 0.5f; n.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); n.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); n.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); float3 originalvel = ivel; if (length(n) > 0) { n = normalize(n); //指向外侧 if (cntcollide>0) //发生碰撞 { collisionpos /= cntcollide; if (length(n) > 0) { //correct vel and pos; ivel -= dot(originalvel, n)*n; //法向速度置为与固体一样 //ivel *= 1.1f; ipos += depth * n; } } else if (cntadhesion>0) //有一定的吸引力 { float alpha = 0.1f; ivel -= n * alpha * length(ivel); } } } //并根据新的速度更新位置 //边界 float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); if (ipos.x>tmax.x) ivel.x *= -0.5f, ipos.x = tmax.x; if (ipos.x<tmin.x) ivel.x *= -0.5f, ipos.x = tmin.x; if (ipos.y>tmax.y) ivel.y *= -0.5f, ipos.y = tmax.y; if (ipos.y<tmin.y) ivel.y *= -0.5f, ipos.y = tmin.y; if (ipos.z>tmax.z) ivel.z *= -0.5f, ipos.z = tmax.z; if (ipos.z<tmin.z) ivel.z *= -0.5f, ipos.z = tmin.z; ipos += ivel*dparam.dt; //存储新的速度和位置 pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void buoyancyForSolid(float3 *ppos, float3 *pvel, char *pflag, int pnum, uint *gridstart, uint *gridend, float SolidBuoyanceParam) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { int cnt = 0; int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float r = dparam.cellsize.x; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start != CELL_UNDEF) { for (uint p = start; p<gridend[gidx]; p++) if (pflag[p] == TYPEFLUID && length(ppos[p] - ipos)<r) cnt++; } } } if (cnt>2) pvel[idx].z += (dparam.waterrho - dparam.solidrho) * SolidBuoyanceParam * dparam.dt; } } __global__ void solidCollisionWithBound(float3 *ppos, float3 *pvel, char *pflag, int pnum, float SolidbounceParam, int nSolPoint) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //check position float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; //float eps=1e-6; // 反向的速度与“穿透深度,系数,粒子个数”相关。 //(与粒子个数相关主要是因为这个速度是起到“惩罚力”的作用,而粒子个数起到“质量”的作用,在粒子的速度向刚体转换的时候,相当于一个“平均(除质量)”的过程) if (ipos.x<tmin.x) ivel.x += (tmin.x - ipos.x) * SolidbounceParam * nSolPoint; if (ipos.x>tmax.x) ivel.x -= (ipos.x - tmax.x) * SolidbounceParam * nSolPoint; if (ipos.y<tmin.y) ivel.y += (tmin.y - ipos.y) * SolidbounceParam * nSolPoint; if (ipos.y>tmax.y) ivel.y -= (ipos.y - tmax.y) * SolidbounceParam * nSolPoint; if (ipos.z<tmin.z) ivel.z += (tmin.z - ipos.z) * SolidbounceParam * nSolPoint; if (ipos.z>tmax.z) ivel.z -= (ipos.z - tmax.z) * SolidbounceParam * nSolPoint; pvel[idx] = ivel; //ppos[idx]=ipos; //不能修改位置,刚体会变形 } } //there is a problem here, remember to solve it. // __global__ void genAirFromSolid_k( float3 *ppos, float3 *pvel, char *pflag, float *psolubility, float *paircontain, float *pmass, float *pTemperature,int pnum, // charray lsmark, farray phisolid, farray Tgrid, int *addnum, float *randfloat, int nrandnum, int frame ) // { // int idx=__mul24( blockIdx.x, blockDim.x )+threadIdx.x; // if( idx<dparam.gnum &&lsmark[idx]==TYPEFLUID && phisolid[idx]>0 ) //此格子是流体格子 // { // int i,j,k; // getijk( i,j,k,idx); // bool flag=false; // for( int di=-1; di<=1; di++ ) for( int dj=-1; dj<=1; dj++ ) for( int dk=-1; dk<=1; dk++ ) // { // if(verifycellidx(i+di,j+dj,k+dk) && phisolid( i+di,j+dj,k+dk)<0 ) // flag=true; // } // if( !flag ) // return; // // int cnt= (idx*frame) % ( nrandnum-100 ); // if( randfloat[cnt++]>0.95 ) //if randnum>thresold, generate a airsolo bubble // { // int addidx=atomicAdd( addnum, 1 ); // float3 addpos= (make_float3(randfloat[cnt], randfloat[cnt], randfloat[cnt]) + make_float3(i,j,k) ) * dparam.cellsize.x; // ppos[pnum+addidx] = addpos; // pvel[pnum+addidx]=make_float3(0); // pflag[pnum+addidx]=TYPEAIRSOLO; // psolubility[pnum+addidx]=0; // paircontain[pnum+addidx]=0; // pmass[pnum+addidx]=dparam.airm0; // pTemperature[pnum+addidx]=getScaleFromFrid( addpos, Tgrid ); // } // } // } //这个函数是考虑latent heat的主函数,当温度超过界限时(如固体的温度高于熔点),则多余的热量放到latent heat里;当latent heat满足一定条件时,发生phase change. __global__ void updateLatentHeat_k(float *parTemperature, float *parLHeat, char *partype, int pnum, float meltingpoint, float boilingpoint, float LiquidHeatTh) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (partype[idx] == TYPESOLID && parTemperature[idx]>meltingpoint) { parLHeat[idx] += parTemperature[idx] - meltingpoint; parTemperature[idx] = meltingpoint; } if (partype[idx] == TYPEFLUID) { if (parTemperature[idx]<meltingpoint) { parLHeat[idx] -= meltingpoint - parTemperature[idx]; parTemperature[idx] = meltingpoint; } else if (parTemperature[idx]>boilingpoint) { parLHeat[idx] += parTemperature[idx] - boilingpoint; // parLHeat[idx] = min( parLHeat[idx], LiquidHeatTh+5 ); parTemperature[idx] = boilingpoint; } else parLHeat[idx] = LiquidHeatTh; } } } __global__ void pouringwater(float3* pos, float3* vel, float* parmass, char* parflag, float *ptemperature, float *pLHeat, float *pGasContain, int parnum, float3 *ppourpos, float3 *ppourvel, char pourflag, int pournum, float *randfloat, int randnum, int frame, float posrandparam, float velrandparam, float defaultLiquidT, float LiquidHeatTh) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pournum) { //速度与位置的随机化 int randbase = (frame + idx) % (randnum - 6); float3 randvel = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; randbase += 3; float3 randpos = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; pos[parnum + idx] = ppourpos[idx] + randpos * posrandparam*dparam.samplespace; vel[parnum + idx] = ppourvel[idx] + randvel * velrandparam; parmass[parnum + idx] = dparam.m0; parflag[parnum + idx] = pourflag; ptemperature[parnum + idx] = defaultLiquidT; pLHeat[parnum + idx] = LiquidHeatTh; pGasContain[parnum + idx] = 0; } } inline __device__ float getlen(float x, float y) { return sqrt(x*x + y*y); } __global__ void initheat_grid_k(farray tp, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float x = i, z = k; float r = NX*0.15; if (getlen(x - NX / 4, z - NZ / 4) <= r) tp[idx] = 100, mark[idx] = TYPESOLID; else if (getlen(x - NX / 4 * 3, z - NZ / 4 * 3) <= r) tp[idx] = 0, mark[idx] = TYPEFLUID; else if (z<NZ / 2) tp[idx] = 20, mark[idx] = TYPEVACUUM; else tp[idx] = 80, mark[idx] = TYPEAIR; } } __global__ void set_softparticle_position(float3* solidParPos, float3* mParPos, float3* solidParVelFLIP,float3* mParVel, char* partype) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) if (partype[idx]==TYPESOLID) { mParPos[idx] = solidParPos[idx]; mParVel[idx] = (solidParVelFLIP[idx]+mParVel[idx])/2.0; // mParVel[idx] = solidParVelFLIP[idx]; } }; //****************************************LBM algorithm***************************** __global__ void initLBMfield_k(farray ux, farray uy,farray uz, charray mark, farray f0, farray rho0) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) //for(int idx=0;idx<dparam.gnum;idx++) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] ==TYPEBOUNDARY || mark[idx]==TYPEVACUUM ) { rho0.data[idx] = 0.; } else rho0.data[idx] = 1; for(int Qm = 0; Qm < 19; Qm++) { f0(i,j,k,Qm) = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, rho0(idx), make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); //if(f0(i,j,k,Qm))printf("(%d,%d,%d)-%f\n", i, j, k, f0(i, j, k,Qm)); } } } __global__ void deriveLBMquantities_k(farray ux, farray uy, farray uz, charray mark, farray f, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) // for(idx=0;idx<dparam.gnum;idx++) {// calculate average density //if (mark[idx] == TYPEVACUUM || mark[idx] == TYPEBOUNDARY) // return; rho.data[idx] = 0; int i,j,k; getijk(i,j,k,idx); for ( int Qm= 0; Qm < 19; Qm++) { rho(i,j,k) += f(i,j,k,Qm); } // calculate average velocity u ux.data[idx] = 0; uy.data[idx] = 0; uz.data[idx] = 0; if (rho.data[idx] > 0) { for (int Qm = 0; Qm < 19; Qm++) { ux.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].x; uy.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].y; uz.data[idx] += f(i,j,k,Qm) * LBM_dparam.vel_i[Qm].z; } float s = 1 / rho.data[idx]; ux.data[idx] *= s; uy.data[idx] *= s; uz.data[idx] *= s; } // rescale in case maximum velocity is exceeded float n = Vec3_Norm(make_float3(ux(idx), uy(idx), uz(idx))); if (n > v_max) { ux.data[idx] *= v_max / n; uy.data[idx] *= v_max / n; uz.data[idx] *= v_max / n; } } } __global__ void CalcLBMcollision_k(farray ux, farray uy, farray uz, charray mark, farray df, farray dF, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; //for(int idx=0;idx<dparam.gnum;idx++) if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEBOUNDARY || mark[idx] == TYPEVACUUM) return; for (int Qm = 0; Qm < 19; Qm++) { // calculate equilibrium distribution function float f_eq; f_eq = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, rho(idx), make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); // perform collision dF(i, j, k, Qm) = (1 - LBM_dparam.omega)*df(i, j, k, Qm) + LBM_dparam.omega * f_eq; //gravity 在粒子上施加重力了 这里暂不需要再加 dF(i, j, k, Qm) += rho(idx) * LBM_dparam.weight[Qm] * dot(LBM_dparam.vel_i[Qm], make_float3(0,0,-0.1));//gravity==-0.1向下 df(i, j, k, Qm) = dF(i,j,k,Qm); } } } __global__ void LBMStream_k(farray ux, farray uy, farray uz, charray mark, farray df_stream, farray dF, farray rho,float *mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; int neighidx; int ii, jj, kk;//after stream getijk(i, j, k, idx); if (mark[idx] == TYPEBOUNDARY || mark[idx] == TYPEVACUUM) return; // copy distribution function corresponding to velocity zero df_stream(i, j, k, 0) = dF(i, j, k, 0); float df_neigh[19] = { 0 }; if (mark[idx] & (TYPEFLUID | TYPESOLID)) for (int Qm = 1; Qm < 19; Qm++)//omite zero vector { ii = i - LBM_dparam.vel_i[Qm].x; jj = j - LBM_dparam.vel_i[Qm].y; kk = k - LBM_dparam.vel_i[Qm].z; neighidx = getidx(ii, jj, kk); df_neigh[Qm] = dF(ii,jj,kk,Qm); // fluid cell must not be adjacent to an empty cell assert((mark[neighidx] & TYPEVACUUM) == 0); if (mark[neighidx] & (TYPEFLUID | TYPESURFACE |TYPESOLID)) { // mass exchange with fluid or interface cell, Eq. (4.2) mass[idx] += (df_neigh[Qm] - dF(i,j,k,LBM_dparam.invVel_i[Qm])); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else //type & CT_OBSTACLE df_stream(i, j, k, Qm) = dF[i,j,k,LBM_dparam.invVel_i[Qm]];// reflect density functions, Eq. (3.5) } else if (mark[idx] & TYPESURFACE) { const float epsilon = CalcEpsilon(mark[idx], rho[idx], mass[idx]); // calculate atmospheric equilibrium distribution function real f_atm_eq[19]; for(int Qm=0;Qm<19;Qm++) f_atm_eq[Qm] = LBMfeq(make_float3(ux(i, j, k), uy(i, j, k), uz(i, j, k)), LBM_dparam.omega, 1.0,//rhoA make_int3(LBM_dparam.vel_i[Qm].x, LBM_dparam.vel_i[Qm].y, LBM_dparam.vel_i[Qm].z), LBM_dparam.R*LBM_dparam.LBM_T0); for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { df_neigh[Qm] = dF(i - LBM_dparam.vel_i[Qm].x, j - LBM_dparam.vel_i[Qm].y, k - LBM_dparam.vel_i[Qm].z, Qm); if (mark[neighidx] & TYPEFLUID) { // mass exchange between fluid and interface cell, Eq. (4.2) mass[idx] += (df_neigh[Qm] - dF(i, j, k, LBM_dparam.invVel_i[Qm])); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else if (mark[neighidx] & TYPESURFACE) { const float eps_neigh = CalcEpsilon(mark[neighidx], rho[neighidx], mass[neighidx]); // mass exchange between two interface cells, Eq. (4.3) mass[idx] += CalcMassExchange(mark[idx], mark[neighidx], df_neigh[Qm], dF[i,j,k,LBM_dparam.invVel_i[Qm]])*0.5*(eps_neigh + epsilon); // standard streaming, Eq. (3.1) df_stream(i, j, k, Qm) = df_neigh[Qm]; } else if (mark[neighidx] & TYPEVACUUM) // no mass exchange from or to empty cell // reconstructed atmospheric distribution function, Eq. (4.5) df_stream(i, j, k, Qm) = f_atm_eq[Qm] + f_atm_eq[LBM_dparam.invVel_i[Qm]] - dF(i,j,k,LBM_dparam.invVel_i[Qm]); else // df_neigh->type & CT_OBSTACLE { // reflect density functions, Eq. (3.5) df_stream(i, j, k, Qm) = dF(i, j, k, LBM_dparam.invVel_i[Qm]); } } // calculate surface normal const float3 norm = CalcLBMNormal(mark, rho, mass, i, j, k); // always use reconstructed atmospheric distribution function for directions along surface normal; // separate loop to handle mass exchange correctly for (int Qm = 1; Qm < 19; Qm++)//omite zero vector { if (dot(norm, LBM_dparam.vel_i[LBM_dparam.invVel_i[Qm]]) > 0) // Eq.4.6 // reconstructed atmospheric distribution function, Eq. (4.5) df_stream(i, j, k, Qm) = f_atm_eq[Qm] + f_atm_eq[LBM_dparam.invVel_i[Qm]] - dF(i, j, k, LBM_dparam.invVel_i[Qm]); } }// df->type & TYPESURFACE for (int Qm = 0; Qm < 19; Qm++) dF(i, j, k, Qm) = df_stream(i, j, k, Qm); } } __global__ void initLBMmass_k(charray mark, float* dgmass, farray rho) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark[idx] == TYPEFLUID || mark[idx]== TYPESOLID) dgmass[idx] = rho[idx]; else if (mark[idx] == TYPESURFACE) dgmass[idx] = rho[idx] * 0.5; else dgmass[idx] = 0; } } __global__ void LBMFluidmass2rho_k(charray mark, farray rho, float* mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark[idx] & (TYPEFLUID | TYPESOLID)) { assert(fabs(rho[idx] / mass[idx] - 1) < 5e-6); rho[idx] = mass[idx]; } } } __global__ void LBMUpdateType1_k(charray mark,charray oldmark, float3 * oldnorm, farray df, farray df_next, farray rho, float* mass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); //copy typies oldmark[idx] = mark[idx]; oldnorm[idx] = CalcLBMNormal(mark, rho, mass, i, j, k);//4里面用到 // current cell //for (int Qm = 0; Qm < 19; Qm++) // if(df_next(i, j, k, Qm) != df(i, j, k, Qm))printf("!!!!!!!!!!!!!!!!!!!!"); // check whether interface cells emptied or filled if (mark[idx] & TYPESURFACE) { // Eq. (4.7), and remove interface cell artifacts if ( (mass[idx] > (1 + FILL_OFFSET)*rho[idx]) || (mass[idx] >= (1 - LONELY_THRESH)*rho[idx] && (mark[idx] & TYPENOEMPTYMEIGH))) //surface to fluid cell mark[idx] = TYPE_IF_TO_FLUID; else if ( (mass[idx] < -FILL_OFFSET*rho[idx]) || ((mass[idx] <= LONELY_THRESH*rho[idx]) && (mark[idx] & TYPENOFLUIDNEIGH)) || ((mark[idx] & TYPENOIFACENEIGH) && (mark[idx] & TYPENOFLUIDNEIGH))) // interface to empty cell mark[idx] = TYPE_IF_TO_EMPTY; } // clear neighborhood flags (will be determined later) mark[idx] &= ~(TYPENOFLUIDNEIGH | TYPENOIFACENEIGH | TYPENOIFACENEIGH); } } __global__ void LBMUpdateType2_k(charray mark, charray oldmark, farray df, farray df_next, farray rho,farray tmprho, float* mass, farray ux,farray uy,farray uz, farray tmpux, farray tmpuy, farray tmpuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); float df_neigh[19] = { 0 }; tmprho[idx] = rho[idx]; tmpux[idx] = ux[idx]; tmpuy[idx] = uy[idx]; tmpuz[idx] = uz[idx]; // set flags for filled interface cells (interface to fluid) if (mark[idx] & TYPE_IF_TO_FLUID) { // keep flag 'CT_IF_TO_FLUID' for later excess mass distribution // convert neighboring empty cells to interface cells for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { df(i, j, k, Qm) = df_next(i, j, k, Qm);//useless? int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; df_neigh[Qm] = df_next(i_neigh, j_neigh, k_neigh, Qm); int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEVACUUM) { mark[idxneig] = TYPESURFACE; // initialize cell with average density and velocity of surrounding cells, using f0//!!!易出错 LBMAverageSurrounding(oldmark, mass, rho[idxneig],tmprho, df_next, ux[idxneig], uy[idxneig],uz[idxneig], tmpux, tmpuy, tmpuz, i_neigh,j_neigh,k_neigh,Qm); //charray mark, float* mass, float &rho, farray tmprho, farray df, float &ux, float &uy, float &uz, farray tmpux, farray tmpuy, farray tmpuz, int i, int j, int k } } // prevent neighboring cells from becoming empty for (int Qm = 1; Qm < 19; Qm++)// omit zero vector { //neighbor cell int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; //df_neigh[Qm] = df_next(i_neigh, j_neigh, k_neigh, Qm); int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPE_IF_TO_EMPTY) mark[idxneig] = TYPESURFACE; //df(i, j, k, Qm) = df_next(i, j, k, Qm) } } } } __global__ void LBMUpdateType3_k(charray mark, farray df, farray df_next, farray rho, float* mass) { // set flags for emptied interface cells (interface to empty) int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); if (mark[idx] & TYPE_IF_TO_EMPTY) // keep flag 'CT_IF_TO_EMPTY' for later excess mass distribution // convert neighboring fluid cells to interface cells for (int Qm = 1; Qm < 19; Qm++)//omit zero vector { //neighbor cell int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEFLUID) mark[idxneig] = TYPESURFACE; } } } __global__ void LBMUpdateType4_k(charray mark, charray markold, float3* oldnorm, farray df_distr, farray dF, farray rho, float* mass) { // distribute excess mass int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); // calculate surface normal using 'f0', such that excess mass distribution is independent of the filled cell ordering float3 norm = oldnorm[idx]; //excess mass float mex; if (mark[idx] & TYPE_IF_TO_FLUID) { mex = mass[idx] - rho[idx]; // after excess mass has been distributed, remaining mass equals density mass[idx] = rho[idx]; } else if (mark[idx] & TYPE_IF_TO_EMPTY) { mex = mass[idx]; //flip sign of noraml; norm.x = -norm.x; norm.y = -norm.y; norm.z = -norm.z; // after negative excess mass has been distributed, remaining mass is zero mass[idx] = 0; } else return; // Eq. (4.9) float eta[19] = { 0 }; float eta_total = 0; unsigned int isIF[19] = { 0 }; unsigned int numIF = 0;// number of interface cell neighbors for (int Qm = 0; Qm < 19; Qm++) df_distr(i, j, k, Qm) = 0; for (int Qm = 1; Qm < 19; Qm++)//omit zero vector { // neighbor cell in the direction of velocity vector int i_neigh = i + LBM_dparam.vel_i[Qm].x; int j_neigh = j + LBM_dparam.vel_i[Qm].y; int k_neigh = k + LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPESURFACE) { eta[Qm] = dot(LBM_dparam.vel_i[Qm], norm); if (eta[Qm] < 0) eta[Qm] = 0; eta_total += eta[Qm]; isIF[Qm] = 1; numIF++; } // store excess mass to be distributed in 'f_distr'; // don't actually distribute yet to ensure independence of cell traversal order // cell for excess mass distribution, store in distribution functions if (eta_total > 0) { float eta_fac = 1 / eta_total; for (int Qm = 1; Qm < 19; Qm++)//omit zere vector { // eta[i] is zero for non-interface cells df_distr(i, j, k, Qm) = mex*eta[Qm] * eta_fac; } } else if (numIF > 0) { // distribute uniformly float mex_rel = mex / numIF; for (int Qm = 1; Qm < 19; Qm++)//omit zere vector { df_distr(i, j, k, Qm) = (isIF[Qm] ? mex_rel : 0); //df } } // else, excess mass cannot be distributed, i.e., has leaked // dF(i, j, k, Qm) = df_distr(i, j, k, Qm); } } } __global__ void LBMUpdateType5_k(charray mark, farray df, farray dF, farray rho, float* mass) { //// collect distributed mass and finalize cell flags int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { getijk(i, j, k, idx); if (mark[idx] & TYPESURFACE) for (int Qm = 1; Qm < 19; Qm++) { // neighbor cell in the direction of velocity vector int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); mass[idx] += df(i_neigh,j_neigh,k_neigh,Qm); } else if (mark[idx] & TYPE_IF_TO_FLUID) mark[idx] = TYPEFLUID; else if (mark[idx] & TYPE_IF_TO_EMPTY) mark[idx] = TYPEVACUUM; // assert((mark[idx] & (CT_OBSTACLE | CT_FLUID | CT_INTERFACE | CT_EMPTY)) != 0); // assert((mark[idx] & ~(CT_OBSTACLE | CT_FLUID | CT_INTERFACE | CT_EMPTY)) == 0); } } __global__ void LBMUpdateType6_k(charray mark, farray df, farray df_next, farray rho, float* mass) { // set cell neighborhood flags int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; if (idx < dparam.gnum) { // ignore obstacle cells if (mark[idx] & TYPEBOUNDARY) return; // set "no ... neighbor" flags mark[idx] |= (TYPENOEMPTYMEIGH | TYPENOFLUIDNEIGH | TYPENOIFACENEIGH); for(int Qm = 1; Qm < 19; Qm++)//omit zere vector { // neighbor cell in the direction of velocity vector int i_neigh = i - LBM_dparam.vel_i[Qm].x; int j_neigh = j - LBM_dparam.vel_i[Qm].y; int k_neigh = k - LBM_dparam.vel_i[Qm].z; int idxneig = getidx(i_neigh, j_neigh, k_neigh); if (mark[idxneig] & TYPEFLUID) //remove "no fluid neighbor" flag mark[idx] &= ~TYPENOFLUIDNEIGH; else if (mark[idxneig] & TYPEVACUUM) // remove "no empty neighbor" flag mark[idx] &= ~TYPENOEMPTYMEIGH; else if (mark[idxneig] & TYPESURFACE) // remove "no interface neighbor" flag mark[idx] &= ~TYPENOIFACENEIGH; } // both flags should not be set simultaneously if (mark[idx] & TYPENOEMPTYMEIGH) mark[idx] &= ~TYPENOFLUIDNEIGH; } } //***********************************************************************************/
0fe00002a87881582b7d6a14fdb529b0f2ea5300.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
0fe00002a87881582b7d6a14fdb529b0f2ea5300.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
e3cc5d63d125d5a829f0128594dbd0916f3c7155.hip
// !!! This is a file automatically generated by hipify!!! //Includes for IntelliSense #define _SIZE_T_DEFINED #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../NeuralNetwork/Activation/ActivationFunction.cu" extern "C" { typedef enum MyBackPropMethod { SGD = 0, RMSProp = 1, } MyBackPropMethod; __device__ float Clip(float value, float clip) { return (clip == 0) * value + (clip != 0) * ((value > clip) * clip + (value < -clip) * -clip + (value >= -clip && value <= clip) * value); /* avoids thread divergence, equivalent to: if (clip == 0) return value; else if (value > clip) return clip; else if (value < -clip) return -clip; else return value; */ } __device__ void SGDWeightUpdate(float trainingRate, float momentum, float clipGradient, float *weights, float *weightDeltas, int weightId, float gradient) { float weightDelta = trainingRate * Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = weightDelta; weights[weightId] -= weightDelta; } __device__ void RMSPropWeightUpdate(float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *weights, float *weightDeltas, float *weightMeanSquares, int weightId, float gradient) { float rmsGradient = Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = rmsGradient; float weightMeanSquare = smoothingFactor * weightMeanSquares[weightId] + (1.0f - smoothingFactor) * rmsGradient * rmsGradient; if (weightMeanSquare != 0) rmsGradient /= sqrtf(weightMeanSquare); weightMeanSquares[weightId] = weightMeanSquare; weights[weightId] -= trainingRate * rmsGradient; } __global__ void LSTMUpdateGateWeightsKernelBPTT( float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float* outputGateWeightGradient, float* inputGateWeightGradient, float* forgetGateWeightGradient, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient[weightId]); } } } __global__ void LSTMUpdateCellWeightsKernelBPTT( float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *cellInputWeightGradient, int inputCount, int previousOutputCount ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; int cellStatesCount = previousOutputCount; if (weightId < weightsPerCell * cellStatesCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellInputWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellInputWeightGradient[weightId]); } } } /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /* /* ORIGINAL FROM KAREL */ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ __global__ void LSTMUpdateGateWeightsKernel( float *input, float *previousOutput, float *cellStates, float *cellStateErrors, float *outputGateDeltas, float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float *inputGateWeightsRTRLPartials, float *forgetGateWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { int fromId = weightId % weightsPerGate; int toId = weightId / weightsPerGate; //calculate output gate weight gradient int isFromInputUnit = fromId >= 0 && fromId < inputCount; int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount); int isPeephole = (fromId >= inputCount + previousOutputCount) && (fromId < inputCount + previousOutputCount + cellsPerBlock); int isFromBiasUnit = fromId == (inputCount + previousOutputCount + cellsPerBlock); float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId] + isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)] + isPeephole * cellStates[isPeephole * (toId * cellsPerBlock + (fromId - inputCount - previousOutputCount))] + isFromBiasUnit * 1; float outputGateWeightGradient = outputGateDeltas[toId] * inputFromWeight; //calculate input and forget gate weight gradients float inputGateWeightGradient = 0; float forgetGateWeightGradient = 0; //loop through cells for (int cellId = toId * cellsPerBlock; cellId < (toId + 1) * cellsPerBlock; cellId++) { inputGateWeightGradient += cellStateErrors[cellId] * inputGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; forgetGateWeightGradient += cellStateErrors[cellId] * forgetGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; } //update gate weights if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient); } else // SGD { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient); } } } __global__ void LSTMUpdateCellWeightsKernel( float *input, float *previousOutput, float *cellStateErrors, float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, float *cellWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; if (weightId < weightsPerCell * previousOutputCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } } } }
e3cc5d63d125d5a829f0128594dbd0916f3c7155.cu
//Includes for IntelliSense #define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../NeuralNetwork/Activation/ActivationFunction.cu" extern "C" { typedef enum MyBackPropMethod { SGD = 0, RMSProp = 1, } MyBackPropMethod; __device__ float Clip(float value, float clip) { return (clip == 0) * value + (clip != 0) * ((value > clip) * clip + (value < -clip) * -clip + (value >= -clip && value <= clip) * value); /* avoids thread divergence, equivalent to: if (clip == 0) return value; else if (value > clip) return clip; else if (value < -clip) return -clip; else return value; */ } __device__ void SGDWeightUpdate(float trainingRate, float momentum, float clipGradient, float *weights, float *weightDeltas, int weightId, float gradient) { float weightDelta = trainingRate * Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = weightDelta; weights[weightId] -= weightDelta; } __device__ void RMSPropWeightUpdate(float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *weights, float *weightDeltas, float *weightMeanSquares, int weightId, float gradient) { float rmsGradient = Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = rmsGradient; float weightMeanSquare = smoothingFactor * weightMeanSquares[weightId] + (1.0f - smoothingFactor) * rmsGradient * rmsGradient; if (weightMeanSquare != 0) rmsGradient /= sqrtf(weightMeanSquare); weightMeanSquares[weightId] = weightMeanSquare; weights[weightId] -= trainingRate * rmsGradient; } __global__ void LSTMUpdateGateWeightsKernelBPTT( float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float* outputGateWeightGradient, float* inputGateWeightGradient, float* forgetGateWeightGradient, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient[weightId]); } } } __global__ void LSTMUpdateCellWeightsKernelBPTT( float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *cellInputWeightGradient, int inputCount, int previousOutputCount ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; int cellStatesCount = previousOutputCount; if (weightId < weightsPerCell * cellStatesCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellInputWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellInputWeightGradient[weightId]); } } } /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /* /* ORIGINAL FROM KAREL */ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ __global__ void LSTMUpdateGateWeightsKernel( float *input, float *previousOutput, float *cellStates, float *cellStateErrors, float *outputGateDeltas, float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float *inputGateWeightsRTRLPartials, float *forgetGateWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { int fromId = weightId % weightsPerGate; int toId = weightId / weightsPerGate; //calculate output gate weight gradient int isFromInputUnit = fromId >= 0 && fromId < inputCount; int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount); int isPeephole = (fromId >= inputCount + previousOutputCount) && (fromId < inputCount + previousOutputCount + cellsPerBlock); int isFromBiasUnit = fromId == (inputCount + previousOutputCount + cellsPerBlock); float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId] + isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)] + isPeephole * cellStates[isPeephole * (toId * cellsPerBlock + (fromId - inputCount - previousOutputCount))] + isFromBiasUnit * 1; float outputGateWeightGradient = outputGateDeltas[toId] * inputFromWeight; //calculate input and forget gate weight gradients float inputGateWeightGradient = 0; float forgetGateWeightGradient = 0; //loop through cells for (int cellId = toId * cellsPerBlock; cellId < (toId + 1) * cellsPerBlock; cellId++) { inputGateWeightGradient += cellStateErrors[cellId] * inputGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; forgetGateWeightGradient += cellStateErrors[cellId] * forgetGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; } //update gate weights if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient); } else // SGD { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient); } } } __global__ void LSTMUpdateCellWeightsKernel( float *input, float *previousOutput, float *cellStateErrors, float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, float *cellWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; if (weightId < weightsPerCell * previousOutputCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } } } }
7bd10387f10790797251769e623a4cb3ce2d3c44.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/cub_reduce.h" #include "paddle/fluid/operators/reduce_sum_op.h" namespace paddle { namespace operators { template <typename T> struct IdentityFunctor { HOSTDEVICE explicit inline IdentityFunctor() {} HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename T> class ReduceSumKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr<bool>("reduce_all"); auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto dims = context.Attr<std::vector<int>>("dim"); bool keep_dim = context.Attr<bool>("keep_dim"); std::vector<int> reduce_dims; if (reduce_all) { reduce_dims.resize(input->dims().size()); for (int i = 0; i < reduce_dims.size(); ++i) reduce_dims[i] = i; } else { for (auto e : dims) { reduce_dims.push_back(e >= 0 ? e : e + input->dims().size()); } } int reduce_num = 1; for (int i = 0; i < reduce_dims.size(); ++i) { reduce_num *= input->dims()[reduce_dims[i]]; } auto stream = context.cuda_device_context().stream(); TensorReduce<T, T, hipcub::Sum, IdentityFunctor<T>>( *input, output, reduce_dims, static_cast<T>(0), hipcub::Sum(), IdentityFunctor<T>(), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(reduce_sum, ops::ReduceSumKernel<float>, ops::ReduceSumKernel<double>, ops::ReduceSumKernel<int>, ops::ReduceSumKernel<int64_t>);
7bd10387f10790797251769e623a4cb3ce2d3c44.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/cub_reduce.h" #include "paddle/fluid/operators/reduce_sum_op.h" namespace paddle { namespace operators { template <typename T> struct IdentityFunctor { HOSTDEVICE explicit inline IdentityFunctor() {} HOSTDEVICE inline T operator()(const T& x) const { return x; } }; template <typename T> class ReduceSumKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr<bool>("reduce_all"); auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto dims = context.Attr<std::vector<int>>("dim"); bool keep_dim = context.Attr<bool>("keep_dim"); std::vector<int> reduce_dims; if (reduce_all) { reduce_dims.resize(input->dims().size()); for (int i = 0; i < reduce_dims.size(); ++i) reduce_dims[i] = i; } else { for (auto e : dims) { reduce_dims.push_back(e >= 0 ? e : e + input->dims().size()); } } int reduce_num = 1; for (int i = 0; i < reduce_dims.size(); ++i) { reduce_num *= input->dims()[reduce_dims[i]]; } auto stream = context.cuda_device_context().stream(); TensorReduce<T, T, cub::Sum, IdentityFunctor<T>>( *input, output, reduce_dims, static_cast<T>(0), cub::Sum(), IdentityFunctor<T>(), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(reduce_sum, ops::ReduceSumKernel<float>, ops::ReduceSumKernel<double>, ops::ReduceSumKernel<int>, ops::ReduceSumKernel<int64_t>);
b7255f898751759c97760a9c3b4af95a78e58382.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"Ocean_kernel.cuh" int cuda_iDivUp(int a, int b) { return (a+(b-1)) / b; } // complex maths functions __device__ float2 conjugate(float2 arg) { return make_float2(arg.x,arg.y); } __device__ float2 complex_exp(float arg) { return make_float2(cosf(arg), sinf(arg)); } __device__ float2 complex_add(float2 a, float2 b) { return make_float2(a.x+b.x , a.y+b.y); } __device__ float2 complex_mult(float2 ab, float2 cd) { return make_float2( (ab.x * cd.x) - (ab.y*cd.y),(ab.x * cd.y) +(ab.y * cd.x)); } /* generate wave heightfield at time t based on inital heightfiled and dispression relationship */ __global__ void generateSpectrumKernel(float2 *h0, float2 *ht, unsigned int in_width, unsigned int out_width, unsigned int out_height, float t, float patchSize) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int in_index = y*in_width + x; unsigned int in_mIndex = (out_height -y)*in_width + (out_width-x);// mirrored index unsigned int out_index = y*out_width + x; // calculate wave vector float2 k; k.x = (-(int)out_width / 2.0f + x) * (2.0f * M_PI / patchSize); k.y = (-(int)out_width / 2.0f + y) * (2.0f * M_PI / patchSize); // calculate dispersion w(k) float k_len = sqrtf(k.x*k.x + k.y*k.y); float w = sqrtf(9.81f * k_len); if ((x < out_width)&&(y < out_width)) { float2 h0_k = h0[in_index]; float2 h0_mk = h0[in_mIndex]; // output frequency-sdapce complex values ht[out_index] = complex_add(complex_mult(h0_k,complex_exp(w*t)),complex_mult(conjugate(h0_mk),complex_exp(-w*t))); } } // update height map values based on output of fft __global__ void updateHeightMapKernel(float *heightMap, float2 *ht, unsigned int width) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f; heightMap[i] = ht[i].x * sign_correction; } __global__ void updateHeightMapKernel_y(float *heightMap, float2 *ht, unsigned int width) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f; heightMap[i] = ht[i].y * sign_correction; } // generate slope by partial differences in spatial domain __global__ void calculateSlopeKernel(float *h,float2 *slopeOut,unsigned int width,unsigned int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float2 slope = make_float2(0.0f,0.0f); if ((x > 0) && (y > 0) && (x < width - 1) && (y < height - 1)) { slope.x = h[i + 1] - h[i - 1]; slope.y = h[i + width] - h[i - width]; } slopeOut[i] = slope; } void cuda_GenerateSpectrumKernel(float2 *d_h0, float2 *d_ht, unsigned int in_width, unsigned int out_width, unsigned int out_height, float animeTime, float patchSize) { dim3 block(32,32,1); dim3 grid(cuda_iDivUp(out_width,block.x), cuda_iDivUp(out_height, block.y),1); hipLaunchKernelGGL(( generateSpectrumKernel) , dim3(grid), dim3(block) , 0, 0, d_h0,d_ht,in_width,out_width,out_height,animeTime,patchSize); } void cuda_UpdateHeightMapKernel(float *d_heightMap, float2 *d_ht, unsigned int width, unsigned int height, bool autoTest) { dim3 block(32, 32, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1); if (autoTest) { hipLaunchKernelGGL(( updateHeightMapKernel_y), dim3(grid), dim3(block), 0, 0, d_heightMap,d_ht,width); } else { hipLaunchKernelGGL(( updateHeightMapKernel), dim3(grid),dim3(block), 0, 0, d_heightMap,d_ht,width); } } void cuda_CalculateSlopKernel(float * hptr, float2 * slopeOut, unsigned int width, unsigned int height) { dim3 block(32, 32, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1); hipLaunchKernelGGL(( calculateSlopeKernel), dim3(grid),dim3(block), 0, 0, hptr,slopeOut,width,height); }
b7255f898751759c97760a9c3b4af95a78e58382.cu
#include"Ocean_kernel.cuh" int cuda_iDivUp(int a, int b) { return (a+(b-1)) / b; } // complex maths functions __device__ float2 conjugate(float2 arg) { return make_float2(arg.x,arg.y); } __device__ float2 complex_exp(float arg) { return make_float2(cosf(arg), sinf(arg)); } __device__ float2 complex_add(float2 a, float2 b) { return make_float2(a.x+b.x , a.y+b.y); } __device__ float2 complex_mult(float2 ab, float2 cd) { return make_float2( (ab.x * cd.x) - (ab.y*cd.y),(ab.x * cd.y) +(ab.y * cd.x)); } /* generate wave heightfield at time t based on inital heightfiled and dispression relationship */ __global__ void generateSpectrumKernel(float2 *h0, float2 *ht, unsigned int in_width, unsigned int out_width, unsigned int out_height, float t, float patchSize) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int in_index = y*in_width + x; unsigned int in_mIndex = (out_height -y)*in_width + (out_width-x);// mirrored index unsigned int out_index = y*out_width + x; // calculate wave vector float2 k; k.x = (-(int)out_width / 2.0f + x) * (2.0f * M_PI / patchSize); k.y = (-(int)out_width / 2.0f + y) * (2.0f * M_PI / patchSize); // calculate dispersion w(k) float k_len = sqrtf(k.x*k.x + k.y*k.y); float w = sqrtf(9.81f * k_len); if ((x < out_width)&&(y < out_width)) { float2 h0_k = h0[in_index]; float2 h0_mk = h0[in_mIndex]; // output frequency-sdapce complex values ht[out_index] = complex_add(complex_mult(h0_k,complex_exp(w*t)),complex_mult(conjugate(h0_mk),complex_exp(-w*t))); } } // update height map values based on output of fft __global__ void updateHeightMapKernel(float *heightMap, float2 *ht, unsigned int width) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f; heightMap[i] = ht[i].x * sign_correction; } __global__ void updateHeightMapKernel_y(float *heightMap, float2 *ht, unsigned int width) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f; heightMap[i] = ht[i].y * sign_correction; } // generate slope by partial differences in spatial domain __global__ void calculateSlopeKernel(float *h,float2 *slopeOut,unsigned int width,unsigned int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = y*width + x; float2 slope = make_float2(0.0f,0.0f); if ((x > 0) && (y > 0) && (x < width - 1) && (y < height - 1)) { slope.x = h[i + 1] - h[i - 1]; slope.y = h[i + width] - h[i - width]; } slopeOut[i] = slope; } void cuda_GenerateSpectrumKernel(float2 *d_h0, float2 *d_ht, unsigned int in_width, unsigned int out_width, unsigned int out_height, float animeTime, float patchSize) { dim3 block(32,32,1); dim3 grid(cuda_iDivUp(out_width,block.x), cuda_iDivUp(out_height, block.y),1); generateSpectrumKernel <<<grid, block >>>(d_h0,d_ht,in_width,out_width,out_height,animeTime,patchSize); } void cuda_UpdateHeightMapKernel(float *d_heightMap, float2 *d_ht, unsigned int width, unsigned int height, bool autoTest) { dim3 block(32, 32, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1); if (autoTest) { updateHeightMapKernel_y<<<grid, block>>>(d_heightMap,d_ht,width); } else { updateHeightMapKernel<<<grid,block>>>(d_heightMap,d_ht,width); } } void cuda_CalculateSlopKernel(float * hptr, float2 * slopeOut, unsigned int width, unsigned int height) { dim3 block(32, 32, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1); calculateSlopeKernel<<<grid,block>>>(hptr,slopeOut,width,height); }
TensorTopK.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/SortingRadixSelect.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <c10/macros/Macros.h> using namespace at::native; namespace at { namespace native { namespace { template <typename T, typename IndexType, int Dim, bool Order> C10_LAUNCH_BOUNDS_1(512) __global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType numTopKSlices, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of IndexType #ifdef __HIP_PLATFORM_HCC__ __shared__ int smem[64]; #else __shared__ int smem[32]; // one per each warp, up to warp limit #endif IndexType slice = getLinearBlockId<IndexType>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice IndexType sliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input); IndexType topKSliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK); IndexType indicesSliceStartIndex = at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices); T* inputSliceStart = &input.data[sliceStartIndex]; T* topKSliceStart = &topK.data[topKSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input T topKValue = ScalarConvert<int, T>::to(0); radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>( inputSliceStart, outputSliceSize, inputSliceSize, inputWithinSliceStride, smem, &topKValue); const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue); // Every value that is strictly less/greater than `pattern` // (depending on sort dir) in sorted int format is in the top-K. // The top-K value itself might not be unique. // // Since there are a variable number of elements that we see that // are within the top-k, we don't know at what index to write out // the resulting values. // In order to get this, we perform an exclusive prefix sum of // `hasTopK`. This will return the resulting index into which we // need to write the result, if a thread has a result. // All threads need to participate in the loop and the prefix sum, // but not necessarily in the load; hence loop bounds being rounded // up to a multiple of the block dim. IndexType numIterations = THCRoundUp(inputSliceSize, (IndexType) blockDim.x); IndexType writeIndexStart = 0; for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK; if (Order) { hasTopK = inRange && (convertedV > topKConverted); } else { hasTopK = inRange && (convertedV < topKConverted); } int index; int carry; exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } writeIndexStart += carry; } // We need to fill in the rest with actual == top-K values. // The number that we need is outputSliceSize - // writeIndexStart. There might be more than that number available, // in which case we have to choose the first seen set. We do this // via a prefix sum to calculate indices for writing results. CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart); IndexType topKRemaining = (outputSliceSize - writeIndexStart); for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK = inRange && (convertedV == topKConverted); int index; int carry; exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK && index < topKRemaining) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } if (carry >= topKRemaining) { break; } topKRemaining -= carry; writeIndexStart += carry; } }; } // namespace TORCH_IMPL_FUNC(topk_out_cuda) (const Tensor& self, int64_t k, int64_t dim, bool largest, bool sorted, const Tensor& values, const Tensor& indices) { TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3}; checkAllSameGPU(__func__, {topK_arg, indices_arg, input_arg}); dim = at::maybe_wrap_dim(dim, self); int numDims = self.dim(); numDims = numDims == 0 ? 1 : numDims; TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions"); int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); Tensor input = self.contiguous(); // If k is 0 the result is an empty tensor, so we don't need to launch a kernel. if (k == 0) { return; } // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \ , dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define RUN_DIR(INDEX_T, DIM) \ if (largest) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \ at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \ at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \ /* tensorInfoLegacyIfScalar*/ \ if (!input.dim()) { \ inputInfo.dims = 1; \ inputInfo.sizes[0] = 1; \ inputInfo.strides[0] = 1; \ topKInfo.dims = 1; \ topKInfo.sizes[0] = 1; \ topKInfo.strides[0] = 1; \ indicesInfo.dims = 1; \ indicesInfo.sizes[0] = 1; \ indicesInfo.strides[0] = 1; \ } \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \ \ dim3 block(::min(at::cuda::ATenCeilDiv(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 512)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); \ }); // the below is safe with 0-dimensional tensors because it is based on // TensorInfo which implicitly expands to 1-dimensional. if (input.numel() > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (at::cuda::detail::canUse32BitIndexMath(input) && at::cuda::detail::canUse32BitIndexMath(values) && at::cuda::detail::canUse32BitIndexMath(indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted && values.numel() > 1) { if (should_use_small_sort(values, dim)) { // This avoids any memory allocations and performs all sorting // work inplace along the slice sortKeyValueInplace(values, indices, dim, largest); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. Tensor sortedIndices = at::empty_like(indices); Tensor sortedValues = at::empty_like(values); sort_out_cuda(values, dim, largest, sortedValues, sortedIndices); indices.copy_(indices.gather(dim, sortedIndices)); values.copy_(sortedValues); } } } } // at::native } // at
TensorTopK.cu
#include <ATen/ATen.h> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/SortingRadixSelect.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <c10/macros/Macros.h> using namespace at::native; namespace at { namespace native { namespace { template <typename T, typename IndexType, int Dim, bool Order> C10_LAUNCH_BOUNDS_1(512) __global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType numTopKSlices, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of IndexType #ifdef __HIP_PLATFORM_HCC__ __shared__ int smem[64]; #else __shared__ int smem[32]; // one per each warp, up to warp limit #endif IndexType slice = getLinearBlockId<IndexType>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice IndexType sliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input); IndexType topKSliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK); IndexType indicesSliceStartIndex = at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices); T* inputSliceStart = &input.data[sliceStartIndex]; T* topKSliceStart = &topK.data[topKSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input T topKValue = ScalarConvert<int, T>::to(0); radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>( inputSliceStart, outputSliceSize, inputSliceSize, inputWithinSliceStride, smem, &topKValue); const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue); // Every value that is strictly less/greater than `pattern` // (depending on sort dir) in sorted int format is in the top-K. // The top-K value itself might not be unique. // // Since there are a variable number of elements that we see that // are within the top-k, we don't know at what index to write out // the resulting values. // In order to get this, we perform an exclusive prefix sum of // `hasTopK`. This will return the resulting index into which we // need to write the result, if a thread has a result. // All threads need to participate in the loop and the prefix sum, // but not necessarily in the load; hence loop bounds being rounded // up to a multiple of the block dim. IndexType numIterations = THCRoundUp(inputSliceSize, (IndexType) blockDim.x); IndexType writeIndexStart = 0; for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK; if (Order) { hasTopK = inRange && (convertedV > topKConverted); } else { hasTopK = inRange && (convertedV < topKConverted); } int index; int carry; exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } writeIndexStart += carry; } // We need to fill in the rest with actual == top-K values. // The number that we need is outputSliceSize - // writeIndexStart. There might be more than that number available, // in which case we have to choose the first seen set. We do this // via a prefix sum to calculate indices for writing results. CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart); IndexType topKRemaining = (outputSliceSize - writeIndexStart); for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK = inRange && (convertedV == topKConverted); int index; int carry; exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK && index < topKRemaining) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } if (carry >= topKRemaining) { break; } topKRemaining -= carry; writeIndexStart += carry; } }; } // namespace TORCH_IMPL_FUNC(topk_out_cuda) (const Tensor& self, int64_t k, int64_t dim, bool largest, bool sorted, const Tensor& values, const Tensor& indices) { TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3}; checkAllSameGPU(__func__, {topK_arg, indices_arg, input_arg}); dim = at::maybe_wrap_dim(dim, self); int numDims = self.dim(); numDims = numDims == 0 ? 1 : numDims; TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions"); int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); Tensor input = self.contiguous(); // If k is 0 the result is an empty tensor, so we don't need to launch a kernel. if (k == 0) { return; } // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ gatherTopK<scalar_t, INDEX_T, DIM, DIR> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define RUN_DIR(INDEX_T, DIM) \ if (largest) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \ at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \ at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \ /* tensorInfoLegacyIfScalar*/ \ if (!input.dim()) { \ inputInfo.dims = 1; \ inputInfo.sizes[0] = 1; \ inputInfo.strides[0] = 1; \ topKInfo.dims = 1; \ topKInfo.sizes[0] = 1; \ topKInfo.strides[0] = 1; \ indicesInfo.dims = 1; \ indicesInfo.sizes[0] = 1; \ indicesInfo.strides[0] = 1; \ } \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \ \ dim3 block(std::min(at::cuda::ATenCeilDiv(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 512)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); \ }); // the below is safe with 0-dimensional tensors because it is based on // TensorInfo which implicitly expands to 1-dimensional. if (input.numel() > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (at::cuda::detail::canUse32BitIndexMath(input) && at::cuda::detail::canUse32BitIndexMath(values) && at::cuda::detail::canUse32BitIndexMath(indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted && values.numel() > 1) { if (should_use_small_sort(values, dim)) { // This avoids any memory allocations and performs all sorting // work inplace along the slice sortKeyValueInplace(values, indices, dim, largest); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. Tensor sortedIndices = at::empty_like(indices); Tensor sortedValues = at::empty_like(values); sort_out_cuda(values, dim, largest, sortedValues, sortedIndices); indices.copy_(indices.gather(dim, sortedIndices)); values.copy_(sortedValues); } } } } // at::native } // at