hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9f665386fafd696f1084b62aad08f891ae4956a9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include "option_kernel.h" #include "../cuPrintf.cu" #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cmath> // number of threads per block int threads_per_block = 256; /*! \brief Used by thrust reduce for squaring * * used by threust reduce for squaring */ template<typename T> struct square { __host__ __device__ T operator()(const T& x) const{ return x*x; } }; /*! \brief A simple function to calculate the CDF at x. * * A simple function to calculate the CDF of normal distribution at x. * Using sqrt(2) = 1.414213562373 */ __device__ float phi(float x) { return 0.5*(1 + erf(x/1.4142136)); } /*! \brief Evaluates the black-Scholes formula for given argument values. * * Evaluates the black-Scholes formula for given argument values. */ __device__ float get_black_scholes_continuation_value_gpu(float x, float time, int height, InputData indata ) { float del_t = indata.expiry_time/(height-1)/365; float t = time*del_t; float d1, d2, den; float ttm = (indata.expiry_time - t)/365; d1 = log(x/indata.strike_price) + ( indata.discount_rate + 0.5*indata.volatility*indata.volatility )*ttm; d2 = log(x/indata.strike_price) + ( indata.discount_rate - 0.5*indata.volatility*indata.volatility )*ttm; den = indata.volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; return indata.strike_price*exp(-1*indata.discount_rate*ttm)*phi(-1*d2) - x*phi(-1*d1); } /*! \brief This kernel function generates asset price paths using normal smaples from CPU. * * This kernel function generates asset price paths for the underlying stock under * risk-neutral assumption. The asset price paths are stored in Device RAM and is used * by the 'find_optimal_exercise_boundary_and_am_cash_flow' kernel. The stock price * paths follow a Brownian motion. Each thread generates and stores one path. At the * end, this kernel also generates the cash-flows for the corresponding european option, * which are later evaluated to find the value of European option at t=0. The required * normally distributed random samples are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. */ static __global__ void generate_asset_price_paths_and_eu_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, float *norm_sample) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float drift = indata.discount_rate - indata.dividend - 0.5*pow(indata.volatility,2); float del_t = indata.expiry_time/(height-1)/365; float sigma = sqrtf(del_t)*indata.volatility; S[tid*height] = indata.S_0; float temp = indata.S_0; #pragma unroll 10 for (int j = 1; j < height; j++ ) { S[tid*height+j] = temp = temp*exp(drift*del_t + sigma*norm_sample[tid*height+j]); } // Find cash flow and option value for corresponding european options int expiry_index = height-1; float discount_eu = exp(-1*indata.discount_rate*indata.expiry_time/365 ); float cash_temp; cash_flow[tid] = cash_temp = fmaxf(indata.strike_price - S[tid*height+expiry_index], 0.0); //put option_value[tid] = cash_temp*discount_eu; } /*! \brief Initialize states for random number generation on GPU * * Initialize states for random number generation on GPU. Each state is used by * a thread to get a sequence of normally distributed random samples. The seed used * for initialaization is the same for all threads and is read from 'input/option.txt'. */ static __global__ void generate_states(float seed, hiprandState_t *state){ int tid = blockDim.x * blockIdx.x + threadIdx.x; hiprand_init(seed, tid, 0, &state[tid]); } /*! \brief Finds the optimal exercise boundary for american option * * This kernel finds the optimal exercise boundary for a given american * option using Black-Scholes as the continuation criteria. The details are * given in the paper'http://arxiv.org/ftp/arxiv/papers/1205/1205.0106.pdf'. */ static __global__ void find_optimal_exercise_boundary_and_am_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, float *x, float *h, int *optimal_exercise_boundary, float *cash_flow_am) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int expiry_index = height-1; float del_t = indata.expiry_time/(height-1)/365; // discount for merican counterpart float discount = exp(-1*indata.discount_rate*del_t ); float put_value = 0; float xtemp = 0; float htemp = 0; // for all other times when the option can be exercised, we comapre the // value of exercising and continuation value to find optimal exercise boundary optimal_exercise_boundary[tid] = expiry_index; for ( int time = expiry_index-1; time >= 1; time-- ) // move back in time { put_value = fmaxf( indata.strike_price - S[tid*height+time], 0.0); //put xtemp = S[tid*height+time]; cash_flow[tid] = put_value; htemp = get_black_scholes_continuation_value_gpu(xtemp, time, height, indata); if ( cash_flow[tid] > htemp ) { optimal_exercise_boundary[tid] = time; cash_flow_am[tid] = fmaxf(indata.strike_price - S[tid*height+time], 0.0); } } cash_flow_am[tid] = fmaxf(indata.strike_price - S[tid*height+optimal_exercise_boundary[tid]], 0.0); discount = exp(-1*indata.discount_rate*optimal_exercise_boundary[tid]*del_t ); option_value[tid] = cash_flow_am[tid]*discount;//*/ } /*! \brief This kernel function generates asset price paths using normal smaples from "hiprand/hiprand.h". * * This kernel function generates asset price paths for the underlying stock under * risk-neutral assumption. The asset price paths are stored in Device RAM and is used * by the 'find_optimal_exercise_boundary_and_am_cash_flow' kernel. The stock price * paths follow a Brownian motion. Each thread generates and stores mutiple paths. The * number of paths generated by each thread is decided by parameter 'num_paths_per_thread' * which is read from file 'input/options.txt'. At the end, this kernel also generates the * cash-flows for the corresponding european option, which are later evaluated to find the * value of European option at t=0. Each thread uses a 'hiprandState_t' to obtain normally * distributed random samples. */ static __global__ void mp_generate_asset_price_paths_and_eu_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, hiprandState_t *states) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float drift = indata.discount_rate - indata.dividend - 0.5*pow(indata.volatility,2); float del_t = indata.expiry_time/(height-1)/365; float sigma = sqrtf(del_t)*indata.volatility; float S_0 = indata.S_0; float temp; hiprandState_t localState = states[tid]; int m_limit = indata.num_paths_per_thread; #pragma unroll 4 for ( int m = 0; m < m_limit; m++) { S[( m_limit*tid +m )*height] = S_0; temp = S_0; #pragma unroll 10 for (int j = 1; j < height; j++ ) { S[( m_limit*tid +m )*height+j] = temp = temp *exp(drift*del_t + sigma*hiprand_normal(&localState)); } } int expiry_index = height-1; // Find cash flow and option value for corresponding european options float discount_eu = exp(-1*indata.discount_rate*indata.expiry_time/365 ); float cash_temp; #pragma unroll 4 for ( int m = 0; m < m_limit; m++) { cash_flow[(m_limit*tid+m)] = cash_temp = fmaxf(indata.strike_price - S[(m_limit*tid + m)*height+expiry_index], 0.0); //put option_value[(m_limit*tid+m)] = cash_temp*discount_eu; } } /*! \brief This kernel function finds both european and american option values. * * This kernel function finds both european and american option values for * a given underlying asset. The required normally distributed random samples * for generating asset proce paths are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. Each thread generates and evaluates one * path. * * It is optimized in the following ways: * <ol> * <li> The reads and writes are coalesced to obtain maximum throughput. </li> * <li> The stock price paths and cash-flows are not stored in device RAM, but * are only used as temporary variables to calculate and store the option values</li> * </ol> * These optimzation are possible due to the following properties: * <ol> * <li>Any permutation of a normally distributed random samples is also * normally distributed</li> * <li>When using only Black-Scholes as the continuation criteria, moving forward or * backward in time gives the same value for the american option, if for a given * path, the optimal exercise time is chosen to be the minimum of all the possible * exercise times.</li> * </ol> */ static __global__ void find_cash_flows_and_option_values(float *option_value_eu, float *option_value_am, int width, int height, InputData indata, float *norm_sample) { int bid = blockIdx.x; int tid = threadIdx.x; int pathid = blockDim.x * blockIdx.x + threadIdx.x; int ts_am = height-1; int oeb = ts_am; // optimal_exercise_boundary for this path float cf_am = 0; // cash flow of american option for this path // the following should be single read and broadcast for all threads I hope float spot_price = indata.S_0; float strike_price = indata.strike_price; float expiry_time = indata.expiry_time; float discount_rate = indata.discount_rate; float volatility = indata.volatility; //if ( pathid == 256 ) // cuPrintf ("\nAt t=0, spot_price = %g, strike_price = %g\n", spot_price, strike_price); // for decicing optimal exercise boundary in american options float put_value = 0; float h = 0; float time = 0; float d1, d2, den, ttm; // assuming uniformaly distributed option exercise times float drift = discount_rate - indata.dividend - 0.5*pow(volatility,2); float del_t = expiry_time/ts_am/365; float sigma = sqrtf(del_t)*volatility; int k = 0; int nt = blockDim.x; int start_index = bid*nt*ts_am; // 19 float/int register variables so far while ( k < ts_am ) { // NOTE: (k < oeb) should be a good stopping criteria, stop computation as soon as // you exercise, but can lead to highly divergent code paths. //int index = start_index + k*nt + tid; spot_price = spot_price*exp(drift*del_t + sigma*norm_sample[start_index + k*nt + tid]); //if (index > width*height) // cuPrintf ("index = %d for start_index = %d, k = %d, nt = %d, tid= %d\n", index, start_index, k, nt, tid ); put_value = fmaxf( strike_price - spot_price, 0.0); //put //=======================Black-scholes continuation value========================// int kt = k+1; time = kt*del_t; // is the current time //if ( pathid == 256 ) // cuPrintf ("At t = %g, put value = %g; spot_price = %g, strike_price = %g, index = %d and normrand = %g\n", // time, put_value, spot_price, strike_price, index, norm_sample[index]); ttm = (expiry_time - time)/365; d1 = log(spot_price/strike_price) + ( discount_rate + 0.5*volatility*volatility )*ttm; d2 = log(spot_price/strike_price) + ( discount_rate - 0.5*volatility*volatility )*ttm; den = volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; h = strike_price*exp(-1*discount_rate*ttm)*phi(-1*d2) - spot_price*phi(-1*d1); //===============================================================================// if ( oeb > kt & put_value > h ) { oeb = kt; cf_am = fmaxf( strike_price - spot_price, 0.0); } k++; //if ( tid == 0 && bid == 0) // cuPrintf ("At t = %g, put value = %g, spot_price = %g, h = %g, cf_am = %g\n---\n", // time, put_value, spot_price, h, cf_am); } cf_am = (cf_am == 0)?put_value:cf_am; option_value_eu[ pathid ] = put_value*exp(-1*discount_rate*expiry_time/365 ); option_value_am[ pathid ] = cf_am*exp(-1*discount_rate*oeb*del_t ); } /*! \brief This kernel function finds both european and american option values. * * This kernel function finds both european and american option values for * a given underlying asset. The required normally distributed random samples * for generating asset proce paths are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. Each thread generates and stores mutiple paths. The * number of paths generated by each thread is decided by parameter 'num_paths_per_thread' * which is read from file 'input/options.txt'. * * It is optimized in the following ways: * <ol> * <li> The reads and writes are coalesced to obtain maximum throughput. </li> * <li> The stock price paths and cash-flows are not stored in device RAM, but * are only used as temporary variables to calculate and store the option values</li> * </ol> * These optimzation are possible due to the following properties: * <ol> * <li>Normally distributed random samples are obtained on-the-fly using hiprandState_t</li> * <li>When using only Black-Scholes as the continuation criteria, moving forward or * backward in time gives the same value for the american option, if for a given * path, the optimal exercise time is chosen to be the minimum of all the possible * exercise times.</li> * </ol> */ static __global__ void mp_find_cash_flows_and_option_value(float *option_value_eu, float *option_value_am, int width, int height, InputData indata, hiprandState_t *states) { //int bid = blockIdx.x; int tid = threadIdx.x; int ts_am = height-1; // the following should be single read and broadcast for all threads I hope float spot_price = indata.S_0; float strike_price = indata.strike_price; float expiry_time = indata.expiry_time; float discount_rate = indata.discount_rate; float volatility = indata.volatility; //if ( pathid == 256 ) // cuPrintf ("\nAt t=0, spot_price = %g, strike_price = %g\n", spot_price, strike_price); // for deciding optimal exercise boundary in american options float put_value = 0; float h = 0; float time = 0; float d1, d2, den, ttm; // assuming uniformaly distributed option exercise times float drift = discount_rate - indata.dividend - 0.5*pow(volatility,2); float del_t = expiry_time/ts_am/365; float sigma = sqrtf(del_t)*volatility; int m_limit = indata.num_paths_per_thread; int nt = blockDim.x; int pathid = m_limit*blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t localState = states[tid]; // 19 float/int register variables so far #pragma unroll 4 for( int m = 1; m <= m_limit; m++ ) { int oeb = ts_am; // optimal_exercise_boundary for this path float cf_am = 0; // cash flow of american option for this path spot_price = indata.S_0; #pragma unroll 10 for( int k = 0; k < ts_am; k++ ) { spot_price = spot_price*exp(drift*del_t + sigma*hiprand_normal(&localState)); //if (pathid == 100000) // cuPrintf ("pathid = %d, k = %d, nt = %d, tid= %d, m = %d\n", pathid, k, nt, tid, m ); put_value = fmaxf( strike_price - spot_price, 0.0); //put //=======================Black-scholes continuation value========================// int kt = k+1; time = kt*del_t; // is the current time ttm = (expiry_time - time)/365; d1 = log(spot_price/strike_price) + ( discount_rate + 0.5*volatility*volatility )*ttm; d2 = log(spot_price/strike_price) + ( discount_rate - 0.5*volatility*volatility )*ttm; den = volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; h = strike_price*exp(-1*discount_rate*ttm)*phi(-1*d2) - spot_price*phi(-1*d1); //===============================================================================// if ( oeb > kt & put_value > h ) // { oeb = kt; cf_am = put_value; } /*if ( tid == 255 && bid == 0) cuPrintf ("At t = %g, put value = %g, spot_price = %g, h = %g, cf_am = %g\n---\n", time, put_value, spot_price, h, cf_am);*/ } cf_am = (cf_am == 0)?put_value:cf_am; option_value_eu[ pathid ] = put_value*exp(-1*discount_rate*expiry_time/365 ); option_value_am[ pathid ] = cf_am*exp(-1*discount_rate*oeb*del_t ); /*if ( tid == 255 && bid == 0) { cuPrintf (" put_value: %g and cf_am = %g\n", put_value, cf_am); cuPrintf (" option_value_eu[%d] = %g, option_value_am[%d] = %g\n---\n", pathid, option_value_eu[ pathid ], pathid, option_value_am[ pathid ]); }*/ pathid = pathid + nt; } } /*! \brief A simple function to catch and disply cuda errors while memory allocation, * deallocation etc. * * A simple function to catch and disply cuda errors while memory allocation, * deallocation etc. */ void checkError(hipError_t err) { if (err != hipSuccess) { fprintf(stderr, "cuda function failed (error code %s)\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } /****************************************************************************/ /*! \brief First wrapper function that calls the kernels. * * This function is the first wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * generates normally distributed random samples (for use in GPU), * then calls the kernel that uses the random samples to compute * asset price paths (one path per thread) followed by the kernel that * finds the optimal exercise boundary for american option. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_normrand( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using normrand_v1()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. float *d_S = NULL; float *d_x = NULL; float *d_h = NULL; float *d_cash_flow = NULL; float *d_option_value = NULL; float *d_cash_flow_am = NULL; int *d_optimal_exercise_boundary = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipEvent_t start0, stop0; hipEventCreate(&start0); hipEventCreate(&stop0); hipEventRecord(start0,0); checkError(hipMalloc((void**)&d_S, width*sizeof(float)*height)); checkError(hipMalloc((void**)&d_x, width*sizeof(float))); checkError(hipMalloc((void**)&d_h, width*sizeof(float))); checkError(hipMalloc((void**)&d_cash_flow, width*sizeof(float))); checkError(hipMalloc((void**)&d_option_value, width*sizeof(float))); checkError(hipMalloc((void**)&d_cash_flow_am, width*sizeof(float))); checkError(hipMalloc((void**)&d_optimal_exercise_boundary, width*sizeof(int))); hipEventRecord(stop0,0); hipEventSynchronize(stop0); hipEventElapsedTime(&GPU_t, start0, stop0); #ifdef VERBOSE printf("\n### normrand_v1(): Time to do initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/threadsPerBlock); hipEvent_t start2, stop2; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventRecord(start2,0); random_normal normrnd; normrnd.zigset( 78542121 ); size_t size_norm = width*height*sizeof(float); float *h_norm_sample = (float *) malloc(size_norm); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_norm_sample[i*height+j] = normrnd.RNOR(); //printf("h = %f\n", h_norm_sample[i*height+j]); } } #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); printf(" - size of d_norm_sample: %d\n", size_norm/4); cudaPrintfInit(); #endif float *d_norm_sample = NULL; checkError(hipMalloc((void**)&d_norm_sample, size_norm)); checkError(hipMemcpy(d_norm_sample, h_norm_sample, size_norm, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( generate_asset_price_paths_and_eu_cash_flow), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_S, d_cash_flow, d_option_value, width, height, h_indata, d_norm_sample); hipEventRecord(stop2,0); hipEventSynchronize(stop2); hipEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### normrand_v1(): Time to generate normal samples in CPU and price paths in GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; hipEvent_t start3, stop3; hipEventCreate(&start3); hipEventCreate(&stop3); hipEventRecord(start3,0); hipLaunchKernelGGL(( find_optimal_exercise_boundary_and_am_cash_flow), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_S, d_cash_flow, d_option_value, width, height, h_indata, d_x, d_h, d_optimal_exercise_boundary, d_cash_flow_am); hipEventRecord(stop3,0); hipEventSynchronize(stop3); hipEventElapsedTime(&GPU_t, start3, stop3); #ifdef VERBOSE printf("\n### normrand_v1(): Time to generate optimal exercise boundary in GPU: %fs\n", GPU_t/1000); #endif float sum_a = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ); if ( hipSuccess != cuda_status ) { printf("normrand_v1() Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(hipFree(d_S)); checkError(hipFree(d_x)); checkError(hipFree(d_h)); checkError(hipFree(d_cash_flow)); checkError(hipFree(d_option_value)); checkError(hipFree(d_cash_flow_am)); checkError(hipFree(d_optimal_exercise_boundary)); checkError(hipFree(d_norm_sample)); } /****************************************************************************/ /*! \brief Second warpper function that calls the kernels. * * This function is the second wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * initializes the 'curandStates' (for use in GPU, one for each thread), * then calls the kernel that uses the curandStates to compute * asset price paths (mutiple path per thread) followed by the kernel that * finds the optimal exercise boundary for american options. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_curand( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using curand_v1()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_S = NULL; float *d_x = NULL; float *d_h = NULL; float *d_cash_flow = NULL; float *d_option_value = NULL; float *d_cash_flow_am = NULL; hiprandState_t *d_states = NULL; int *d_optimal_exercise_boundary = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipEvent_t startt, stopt; hipEventCreate(&startt); hipEventCreate(&stopt); hipEventRecord(startt,0); h_indata.num_paths_per_thread = pow(2, ceil(log(h_indata.num_paths_per_thread)/log(2))); checkError(hipMalloc((void**)&d_S, width*sizeof(float)*height)); checkError(hipMalloc((void**)&d_x, width*sizeof(float))); checkError(hipMalloc((void**)&d_h, width*sizeof(float))); checkError(hipMalloc((void**)&d_cash_flow, width*sizeof(float))); checkError(hipMalloc((void**)&d_option_value, width*sizeof(float))); checkError(hipMalloc((void**)&d_cash_flow_am, width*sizeof(float))); checkError(hipMalloc((void**)&d_states, ceil(1.0*width/h_indata.num_paths_per_thread)*sizeof(hiprandState_t))); checkError(hipMalloc((void**)&d_optimal_exercise_boundary, width*sizeof(int))); hipEventRecord(stopt,0); hipEventSynchronize(stopt); hipEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### curand_v1(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/(threadsPerBlock*h_indata.num_paths_per_thread) ); #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); cudaPrintfInit(); #endif hipEvent_t start2, stop2; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventRecord(start2,0); hipLaunchKernelGGL(( generate_states), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, h_indata.random_seed, d_states); hipLaunchKernelGGL(( mp_generate_asset_price_paths_and_eu_cash_flow), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_S, d_cash_flow, d_option_value, width, height, h_indata, d_states); hipEventRecord(stop2,0); hipEventSynchronize(stop2); hipEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### curand_v1(): Time to generate curandStates and asset price paths on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; hipEvent_t start3, stop3; hipEventCreate(&start3); hipEventCreate(&stop3); hipEventRecord(start3,0); hipLaunchKernelGGL(( find_optimal_exercise_boundary_and_am_cash_flow), dim3(blocksPerGrid*h_indata.num_paths_per_thread), dim3(threadsPerBlock), 0, 0, d_S, d_cash_flow, d_option_value, width, height, h_indata, d_x, d_h, d_optimal_exercise_boundary, d_cash_flow_am); hipEventRecord(stop3,0); hipEventSynchronize(stop3); hipEventElapsedTime(&GPU_t, start3, stop3); #ifdef VERBOSE printf("\n### curand_v1(): Time to generate optimal exercise boundary on GPU: %fs\n", GPU_t/1000); #endif float sum_a = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ); if ( hipSuccess != cuda_status ) { printf("curand_v1() Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(hipFree(d_S)); checkError(hipFree(d_x)); checkError(hipFree(d_h)); checkError(hipFree(d_cash_flow)); checkError(hipFree(d_option_value)); checkError(hipFree(d_cash_flow_am)); checkError(hipFree(d_optimal_exercise_boundary)); checkError(hipFree(d_states)); } /****************************************************************************/ /*! \brief Third wrapper function that calls the kernels. * * This function is the first wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * generates normally distributed random samples (for use in GPU), * then calls the kernel that uses the random samples to compute * asset price paths (one path per thread) and the kernel that * finds the optimal exercise boundary for american option. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_normrand_v2( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using normrand_v2()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_option_value = NULL; float *d_option_value_am = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipEvent_t startt, stopt; hipEventCreate(&startt); hipEventCreate(&stopt); hipEventRecord(startt,0); checkError(hipMalloc((void**)&d_option_value, width*sizeof(float))); checkError(hipMalloc((void**)&d_option_value_am, width*sizeof(float))); hipEventRecord(stopt,0); hipEventSynchronize(stopt); hipEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### normrand_v2(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/threadsPerBlock); hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); hipEventRecord(start1,0); random_normal normrnd; normrnd.zigset( 78542121 ); size_t size_norm = width*height*sizeof(float); float *h_norm_sample = (float *) malloc(size_norm); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_norm_sample[i*height+j] = normrnd.RNOR(); } } /*for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { h_norm_sample[j*width + i] = normrnd.RNOR(); } }*/ hipEventRecord(stop1,0); hipEventSynchronize(stop1); hipEventElapsedTime(&GPU_t, start1, stop1); #ifdef VERBOSE printf("\n### normrand_v2(): Time to generate normal samples on CPU: %fs\n", GPU_t/1000); printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); printf(" - num-elemebts in d_norm_sample: %d\n", size_norm/4); cudaPrintfInit(); #endif float *d_norm_sample = NULL; checkError(hipMalloc((void**)&d_norm_sample, size_norm)); checkError(hipMemcpy(d_norm_sample, h_norm_sample, size_norm, hipMemcpyHostToDevice)); hipEvent_t start2, stop2; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventRecord(start2,0); hipLaunchKernelGGL(( find_cash_flows_and_option_values), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_option_value, d_option_value_am, width, height, h_indata, d_norm_sample); hipEventRecord(stop2,0); hipEventSynchronize(stop2); hipEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### normrand_v2(): Time to generate price paths and option values on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; thrust::device_ptr<float> dev_option_value_am_b(d_option_value_am); thrust::device_ptr<float> dev_option_value_am_e = dev_option_value_am_b + width; float sum_a = thrust::reduce(dev_option_value_am_b, dev_option_value_am_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_am_b, dev_option_value_am_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ); if ( hipSuccess != cuda_status ) { printf("normrand_v2() Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(hipFree(d_option_value)); checkError(hipFree(d_option_value_am)); checkError(hipFree(d_norm_sample)); } /****************************************************************************/ /*! \brief Fourth warpper function that calls the kernels. * * This function is the fourth wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * initializes the 'curandStates' (for use in GPU, one for each thread), * then calls the kernel that uses the curandStates to compute * asset price paths (mutiple path per thread) also * finds the optimal exercise boundary for american options. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_curand_v2( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using curand_v2()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_option_value = NULL; float *d_option_value_am = NULL; hiprandState_t *d_states = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipEvent_t startt, stopt; hipEventCreate(&startt); hipEventCreate(&stopt); hipEventRecord(startt,0); h_indata.num_paths_per_thread = pow(2, ceil(log(h_indata.num_paths_per_thread)/log(2))); checkError(hipMalloc((void**)&d_option_value, width*sizeof(float))); checkError(hipMalloc((void**)&d_option_value_am, width*sizeof(float))); checkError(hipMalloc((void**)&d_states, ceil(1.0*width/h_indata.num_paths_per_thread)*sizeof(hiprandState_t))); hipEventRecord(stopt,0); hipEventSynchronize(stopt); hipEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### curand_v2(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/(threadsPerBlock*h_indata.num_paths_per_thread) ); #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); cudaPrintfInit(); #endif hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); hipEventRecord(start1,0); hipLaunchKernelGGL(( generate_states), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, h_indata.random_seed, d_states); hipEventRecord(stop1,0); hipEventSynchronize(stop1); hipEventElapsedTime(&GPU_t, start1, stop1); #ifdef VERBOSE printf("\n### curand_v2(): Time to initial crandStates on GPU: %fs\n", GPU_t/1000); #endif hipEvent_t start2, stop2; hipEventCreate(&start2); hipEventCreate(&stop2); hipEventRecord(start2,0); hipLaunchKernelGGL(( mp_find_cash_flows_and_option_value), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_option_value, d_option_value_am, width, height, h_indata, d_states); hipEventRecord(stop2,0); hipEventSynchronize(stop2); hipEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### curand_v2(): Time to generate price paths and option values on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; thrust::device_ptr<float> dev_option_value_am_b(d_option_value_am); thrust::device_ptr<float> dev_option_value_am_e = dev_option_value_am_b + width; float sum_a = thrust::reduce(dev_option_value_am_b, dev_option_value_am_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_am_b, dev_option_value_am_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ); if ( hipSuccess != cuda_status ) { printf("curand_v2() error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(hipFree(d_option_value)); checkError(hipFree(d_option_value_am)); checkError(hipFree(d_states)); }
9f665386fafd696f1084b62aad08f891ae4956a9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <curand.h> #include "option_kernel.h" #include "../cuPrintf.cu" #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cmath> // number of threads per block int threads_per_block = 256; /*! \brief Used by thrust reduce for squaring * * used by threust reduce for squaring */ template<typename T> struct square { __host__ __device__ T operator()(const T& x) const{ return x*x; } }; /*! \brief A simple function to calculate the CDF at x. * * A simple function to calculate the CDF of normal distribution at x. * Using sqrt(2) = 1.414213562373 */ __device__ float phi(float x) { return 0.5*(1 + erf(x/1.4142136)); } /*! \brief Evaluates the black-Scholes formula for given argument values. * * Evaluates the black-Scholes formula for given argument values. */ __device__ float get_black_scholes_continuation_value_gpu(float x, float time, int height, InputData indata ) { float del_t = indata.expiry_time/(height-1)/365; float t = time*del_t; float d1, d2, den; float ttm = (indata.expiry_time - t)/365; d1 = log(x/indata.strike_price) + ( indata.discount_rate + 0.5*indata.volatility*indata.volatility )*ttm; d2 = log(x/indata.strike_price) + ( indata.discount_rate - 0.5*indata.volatility*indata.volatility )*ttm; den = indata.volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; return indata.strike_price*exp(-1*indata.discount_rate*ttm)*phi(-1*d2) - x*phi(-1*d1); } /*! \brief This kernel function generates asset price paths using normal smaples from CPU. * * This kernel function generates asset price paths for the underlying stock under * risk-neutral assumption. The asset price paths are stored in Device RAM and is used * by the 'find_optimal_exercise_boundary_and_am_cash_flow' kernel. The stock price * paths follow a Brownian motion. Each thread generates and stores one path. At the * end, this kernel also generates the cash-flows for the corresponding european option, * which are later evaluated to find the value of European option at t=0. The required * normally distributed random samples are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. */ static __global__ void generate_asset_price_paths_and_eu_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, float *norm_sample) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float drift = indata.discount_rate - indata.dividend - 0.5*pow(indata.volatility,2); float del_t = indata.expiry_time/(height-1)/365; float sigma = sqrtf(del_t)*indata.volatility; S[tid*height] = indata.S_0; float temp = indata.S_0; #pragma unroll 10 for (int j = 1; j < height; j++ ) { S[tid*height+j] = temp = temp*exp(drift*del_t + sigma*norm_sample[tid*height+j]); } // Find cash flow and option value for corresponding european options int expiry_index = height-1; float discount_eu = exp(-1*indata.discount_rate*indata.expiry_time/365 ); float cash_temp; cash_flow[tid] = cash_temp = fmaxf(indata.strike_price - S[tid*height+expiry_index], 0.0); //put option_value[tid] = cash_temp*discount_eu; } /*! \brief Initialize states for random number generation on GPU * * Initialize states for random number generation on GPU. Each state is used by * a thread to get a sequence of normally distributed random samples. The seed used * for initialaization is the same for all threads and is read from 'input/option.txt'. */ static __global__ void generate_states(float seed, curandState *state){ int tid = blockDim.x * blockIdx.x + threadIdx.x; curand_init(seed, tid, 0, &state[tid]); } /*! \brief Finds the optimal exercise boundary for american option * * This kernel finds the optimal exercise boundary for a given american * option using Black-Scholes as the continuation criteria. The details are * given in the paper'http://arxiv.org/ftp/arxiv/papers/1205/1205.0106.pdf'. */ static __global__ void find_optimal_exercise_boundary_and_am_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, float *x, float *h, int *optimal_exercise_boundary, float *cash_flow_am) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int expiry_index = height-1; float del_t = indata.expiry_time/(height-1)/365; // discount for merican counterpart float discount = exp(-1*indata.discount_rate*del_t ); float put_value = 0; float xtemp = 0; float htemp = 0; // for all other times when the option can be exercised, we comapre the // value of exercising and continuation value to find optimal exercise boundary optimal_exercise_boundary[tid] = expiry_index; for ( int time = expiry_index-1; time >= 1; time-- ) // move back in time { put_value = fmaxf( indata.strike_price - S[tid*height+time], 0.0); //put xtemp = S[tid*height+time]; cash_flow[tid] = put_value; htemp = get_black_scholes_continuation_value_gpu(xtemp, time, height, indata); if ( cash_flow[tid] > htemp ) { optimal_exercise_boundary[tid] = time; cash_flow_am[tid] = fmaxf(indata.strike_price - S[tid*height+time], 0.0); } } cash_flow_am[tid] = fmaxf(indata.strike_price - S[tid*height+optimal_exercise_boundary[tid]], 0.0); discount = exp(-1*indata.discount_rate*optimal_exercise_boundary[tid]*del_t ); option_value[tid] = cash_flow_am[tid]*discount;//*/ } /*! \brief This kernel function generates asset price paths using normal smaples from "curand.h". * * This kernel function generates asset price paths for the underlying stock under * risk-neutral assumption. The asset price paths are stored in Device RAM and is used * by the 'find_optimal_exercise_boundary_and_am_cash_flow' kernel. The stock price * paths follow a Brownian motion. Each thread generates and stores mutiple paths. The * number of paths generated by each thread is decided by parameter 'num_paths_per_thread' * which is read from file 'input/options.txt'. At the end, this kernel also generates the * cash-flows for the corresponding european option, which are later evaluated to find the * value of European option at t=0. Each thread uses a 'curandState' to obtain normally * distributed random samples. */ static __global__ void mp_generate_asset_price_paths_and_eu_cash_flow(float *S, float *cash_flow, float *option_value, int width, int height, InputData indata, curandState *states) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float drift = indata.discount_rate - indata.dividend - 0.5*pow(indata.volatility,2); float del_t = indata.expiry_time/(height-1)/365; float sigma = sqrtf(del_t)*indata.volatility; float S_0 = indata.S_0; float temp; curandState localState = states[tid]; int m_limit = indata.num_paths_per_thread; #pragma unroll 4 for ( int m = 0; m < m_limit; m++) { S[( m_limit*tid +m )*height] = S_0; temp = S_0; #pragma unroll 10 for (int j = 1; j < height; j++ ) { S[( m_limit*tid +m )*height+j] = temp = temp *exp(drift*del_t + sigma*curand_normal(&localState)); } } int expiry_index = height-1; // Find cash flow and option value for corresponding european options float discount_eu = exp(-1*indata.discount_rate*indata.expiry_time/365 ); float cash_temp; #pragma unroll 4 for ( int m = 0; m < m_limit; m++) { cash_flow[(m_limit*tid+m)] = cash_temp = fmaxf(indata.strike_price - S[(m_limit*tid + m)*height+expiry_index], 0.0); //put option_value[(m_limit*tid+m)] = cash_temp*discount_eu; } } /*! \brief This kernel function finds both european and american option values. * * This kernel function finds both european and american option values for * a given underlying asset. The required normally distributed random samples * for generating asset proce paths are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. Each thread generates and evaluates one * path. * * It is optimized in the following ways: * <ol> * <li> The reads and writes are coalesced to obtain maximum throughput. </li> * <li> The stock price paths and cash-flows are not stored in device RAM, but * are only used as temporary variables to calculate and store the option values</li> * </ol> * These optimzation are possible due to the following properties: * <ol> * <li>Any permutation of a normally distributed random samples is also * normally distributed</li> * <li>When using only Black-Scholes as the continuation criteria, moving forward or * backward in time gives the same value for the american option, if for a given * path, the optimal exercise time is chosen to be the minimum of all the possible * exercise times.</li> * </ol> */ static __global__ void find_cash_flows_and_option_values(float *option_value_eu, float *option_value_am, int width, int height, InputData indata, float *norm_sample) { int bid = blockIdx.x; int tid = threadIdx.x; int pathid = blockDim.x * blockIdx.x + threadIdx.x; int ts_am = height-1; int oeb = ts_am; // optimal_exercise_boundary for this path float cf_am = 0; // cash flow of american option for this path // the following should be single read and broadcast for all threads I hope float spot_price = indata.S_0; float strike_price = indata.strike_price; float expiry_time = indata.expiry_time; float discount_rate = indata.discount_rate; float volatility = indata.volatility; //if ( pathid == 256 ) // cuPrintf ("\nAt t=0, spot_price = %g, strike_price = %g\n", spot_price, strike_price); // for decicing optimal exercise boundary in american options float put_value = 0; float h = 0; float time = 0; float d1, d2, den, ttm; // assuming uniformaly distributed option exercise times float drift = discount_rate - indata.dividend - 0.5*pow(volatility,2); float del_t = expiry_time/ts_am/365; float sigma = sqrtf(del_t)*volatility; int k = 0; int nt = blockDim.x; int start_index = bid*nt*ts_am; // 19 float/int register variables so far while ( k < ts_am ) { // NOTE: (k < oeb) should be a good stopping criteria, stop computation as soon as // you exercise, but can lead to highly divergent code paths. //int index = start_index + k*nt + tid; spot_price = spot_price*exp(drift*del_t + sigma*norm_sample[start_index + k*nt + tid]); //if (index > width*height) // cuPrintf ("index = %d for start_index = %d, k = %d, nt = %d, tid= %d\n", index, start_index, k, nt, tid ); put_value = fmaxf( strike_price - spot_price, 0.0); //put //=======================Black-scholes continuation value========================// int kt = k+1; time = kt*del_t; // is the current time //if ( pathid == 256 ) // cuPrintf ("At t = %g, put value = %g; spot_price = %g, strike_price = %g, index = %d and normrand = %g\n", // time, put_value, spot_price, strike_price, index, norm_sample[index]); ttm = (expiry_time - time)/365; d1 = log(spot_price/strike_price) + ( discount_rate + 0.5*volatility*volatility )*ttm; d2 = log(spot_price/strike_price) + ( discount_rate - 0.5*volatility*volatility )*ttm; den = volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; h = strike_price*exp(-1*discount_rate*ttm)*phi(-1*d2) - spot_price*phi(-1*d1); //===============================================================================// if ( oeb > kt & put_value > h ) { oeb = kt; cf_am = fmaxf( strike_price - spot_price, 0.0); } k++; //if ( tid == 0 && bid == 0) // cuPrintf ("At t = %g, put value = %g, spot_price = %g, h = %g, cf_am = %g\n---\n", // time, put_value, spot_price, h, cf_am); } cf_am = (cf_am == 0)?put_value:cf_am; option_value_eu[ pathid ] = put_value*exp(-1*discount_rate*expiry_time/365 ); option_value_am[ pathid ] = cf_am*exp(-1*discount_rate*oeb*del_t ); } /*! \brief This kernel function finds both european and american option values. * * This kernel function finds both european and american option values for * a given underlying asset. The required normally distributed random samples * for generating asset proce paths are obtained from 'norm_sample' array, which is * populated in the CPU and copied to GPU. Each thread generates and stores mutiple paths. The * number of paths generated by each thread is decided by parameter 'num_paths_per_thread' * which is read from file 'input/options.txt'. * * It is optimized in the following ways: * <ol> * <li> The reads and writes are coalesced to obtain maximum throughput. </li> * <li> The stock price paths and cash-flows are not stored in device RAM, but * are only used as temporary variables to calculate and store the option values</li> * </ol> * These optimzation are possible due to the following properties: * <ol> * <li>Normally distributed random samples are obtained on-the-fly using curandState</li> * <li>When using only Black-Scholes as the continuation criteria, moving forward or * backward in time gives the same value for the american option, if for a given * path, the optimal exercise time is chosen to be the minimum of all the possible * exercise times.</li> * </ol> */ static __global__ void mp_find_cash_flows_and_option_value(float *option_value_eu, float *option_value_am, int width, int height, InputData indata, curandState *states) { //int bid = blockIdx.x; int tid = threadIdx.x; int ts_am = height-1; // the following should be single read and broadcast for all threads I hope float spot_price = indata.S_0; float strike_price = indata.strike_price; float expiry_time = indata.expiry_time; float discount_rate = indata.discount_rate; float volatility = indata.volatility; //if ( pathid == 256 ) // cuPrintf ("\nAt t=0, spot_price = %g, strike_price = %g\n", spot_price, strike_price); // for deciding optimal exercise boundary in american options float put_value = 0; float h = 0; float time = 0; float d1, d2, den, ttm; // assuming uniformaly distributed option exercise times float drift = discount_rate - indata.dividend - 0.5*pow(volatility,2); float del_t = expiry_time/ts_am/365; float sigma = sqrtf(del_t)*volatility; int m_limit = indata.num_paths_per_thread; int nt = blockDim.x; int pathid = m_limit*blockDim.x * blockIdx.x + threadIdx.x; curandState localState = states[tid]; // 19 float/int register variables so far #pragma unroll 4 for( int m = 1; m <= m_limit; m++ ) { int oeb = ts_am; // optimal_exercise_boundary for this path float cf_am = 0; // cash flow of american option for this path spot_price = indata.S_0; #pragma unroll 10 for( int k = 0; k < ts_am; k++ ) { spot_price = spot_price*exp(drift*del_t + sigma*curand_normal(&localState)); //if (pathid == 100000) // cuPrintf ("pathid = %d, k = %d, nt = %d, tid= %d, m = %d\n", pathid, k, nt, tid, m ); put_value = fmaxf( strike_price - spot_price, 0.0); //put //=======================Black-scholes continuation value========================// int kt = k+1; time = kt*del_t; // is the current time ttm = (expiry_time - time)/365; d1 = log(spot_price/strike_price) + ( discount_rate + 0.5*volatility*volatility )*ttm; d2 = log(spot_price/strike_price) + ( discount_rate - 0.5*volatility*volatility )*ttm; den = volatility*sqrtf( ttm ); d1 = d1/den; d2 = d2/den; h = strike_price*exp(-1*discount_rate*ttm)*phi(-1*d2) - spot_price*phi(-1*d1); //===============================================================================// if ( oeb > kt & put_value > h ) // { oeb = kt; cf_am = put_value; } /*if ( tid == 255 && bid == 0) cuPrintf ("At t = %g, put value = %g, spot_price = %g, h = %g, cf_am = %g\n---\n", time, put_value, spot_price, h, cf_am);*/ } cf_am = (cf_am == 0)?put_value:cf_am; option_value_eu[ pathid ] = put_value*exp(-1*discount_rate*expiry_time/365 ); option_value_am[ pathid ] = cf_am*exp(-1*discount_rate*oeb*del_t ); /*if ( tid == 255 && bid == 0) { cuPrintf (" put_value: %g and cf_am = %g\n", put_value, cf_am); cuPrintf (" option_value_eu[%d] = %g, option_value_am[%d] = %g\n---\n", pathid, option_value_eu[ pathid ], pathid, option_value_am[ pathid ]); }*/ pathid = pathid + nt; } } /*! \brief A simple function to catch and disply cuda errors while memory allocation, * deallocation etc. * * A simple function to catch and disply cuda errors while memory allocation, * deallocation etc. */ void checkError(cudaError_t err) { if (err != cudaSuccess) { fprintf(stderr, "cuda function failed (error code %s)\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /****************************************************************************/ /*! \brief First wrapper function that calls the kernels. * * This function is the first wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * generates normally distributed random samples (for use in GPU), * then calls the kernel that uses the random samples to compute * asset price paths (one path per thread) followed by the kernel that * finds the optimal exercise boundary for american option. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_normrand( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using normrand_v1()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. float *d_S = NULL; float *d_x = NULL; float *d_h = NULL; float *d_cash_flow = NULL; float *d_option_value = NULL; float *d_cash_flow_am = NULL; int *d_optimal_exercise_boundary = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaEvent_t start0, stop0; cudaEventCreate(&start0); cudaEventCreate(&stop0); cudaEventRecord(start0,0); checkError(cudaMalloc((void**)&d_S, width*sizeof(float)*height)); checkError(cudaMalloc((void**)&d_x, width*sizeof(float))); checkError(cudaMalloc((void**)&d_h, width*sizeof(float))); checkError(cudaMalloc((void**)&d_cash_flow, width*sizeof(float))); checkError(cudaMalloc((void**)&d_option_value, width*sizeof(float))); checkError(cudaMalloc((void**)&d_cash_flow_am, width*sizeof(float))); checkError(cudaMalloc((void**)&d_optimal_exercise_boundary, width*sizeof(int))); cudaEventRecord(stop0,0); cudaEventSynchronize(stop0); cudaEventElapsedTime(&GPU_t, start0, stop0); #ifdef VERBOSE printf("\n### normrand_v1(): Time to do initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/threadsPerBlock); cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2,0); random_normal normrnd; normrnd.zigset( 78542121 ); size_t size_norm = width*height*sizeof(float); float *h_norm_sample = (float *) malloc(size_norm); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_norm_sample[i*height+j] = normrnd.RNOR(); //printf("h = %f\n", h_norm_sample[i*height+j]); } } #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); printf(" - size of d_norm_sample: %d\n", size_norm/4); cudaPrintfInit(); #endif float *d_norm_sample = NULL; checkError(cudaMalloc((void**)&d_norm_sample, size_norm)); checkError(cudaMemcpy(d_norm_sample, h_norm_sample, size_norm, cudaMemcpyHostToDevice)); generate_asset_price_paths_and_eu_cash_flow<<<blocksPerGrid,threadsPerBlock>>>(d_S, d_cash_flow, d_option_value, width, height, h_indata, d_norm_sample); cudaEventRecord(stop2,0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### normrand_v1(): Time to generate normal samples in CPU and price paths in GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; cudaEvent_t start3, stop3; cudaEventCreate(&start3); cudaEventCreate(&stop3); cudaEventRecord(start3,0); find_optimal_exercise_boundary_and_am_cash_flow<<<blocksPerGrid, threadsPerBlock>>>(d_S, d_cash_flow, d_option_value, width, height, h_indata, d_x, d_h, d_optimal_exercise_boundary, d_cash_flow_am); cudaEventRecord(stop3,0); cudaEventSynchronize(stop3); cudaEventElapsedTime(&GPU_t, start3, stop3); #ifdef VERBOSE printf("\n### normrand_v1(): Time to generate optimal exercise boundary in GPU: %fs\n", GPU_t/1000); #endif float sum_a = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ); if ( cudaSuccess != cuda_status ) { printf("normrand_v1() Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(cudaFree(d_S)); checkError(cudaFree(d_x)); checkError(cudaFree(d_h)); checkError(cudaFree(d_cash_flow)); checkError(cudaFree(d_option_value)); checkError(cudaFree(d_cash_flow_am)); checkError(cudaFree(d_optimal_exercise_boundary)); checkError(cudaFree(d_norm_sample)); } /****************************************************************************/ /*! \brief Second warpper function that calls the kernels. * * This function is the second wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * initializes the 'curandStates' (for use in GPU, one for each thread), * then calls the kernel that uses the curandStates to compute * asset price paths (mutiple path per thread) followed by the kernel that * finds the optimal exercise boundary for american options. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_curand( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using curand_v1()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_S = NULL; float *d_x = NULL; float *d_h = NULL; float *d_cash_flow = NULL; float *d_option_value = NULL; float *d_cash_flow_am = NULL; curandState *d_states = NULL; int *d_optimal_exercise_boundary = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaEvent_t startt, stopt; cudaEventCreate(&startt); cudaEventCreate(&stopt); cudaEventRecord(startt,0); h_indata.num_paths_per_thread = pow(2, ceil(log(h_indata.num_paths_per_thread)/log(2))); checkError(cudaMalloc((void**)&d_S, width*sizeof(float)*height)); checkError(cudaMalloc((void**)&d_x, width*sizeof(float))); checkError(cudaMalloc((void**)&d_h, width*sizeof(float))); checkError(cudaMalloc((void**)&d_cash_flow, width*sizeof(float))); checkError(cudaMalloc((void**)&d_option_value, width*sizeof(float))); checkError(cudaMalloc((void**)&d_cash_flow_am, width*sizeof(float))); checkError(cudaMalloc((void**)&d_states, ceil(1.0*width/h_indata.num_paths_per_thread)*sizeof(curandState))); checkError(cudaMalloc((void**)&d_optimal_exercise_boundary, width*sizeof(int))); cudaEventRecord(stopt,0); cudaEventSynchronize(stopt); cudaEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### curand_v1(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/(threadsPerBlock*h_indata.num_paths_per_thread) ); #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); cudaPrintfInit(); #endif cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2,0); generate_states<<<blocksPerGrid,threadsPerBlock>>>(h_indata.random_seed, d_states); mp_generate_asset_price_paths_and_eu_cash_flow<<<blocksPerGrid,threadsPerBlock>>>(d_S, d_cash_flow, d_option_value, width, height, h_indata, d_states); cudaEventRecord(stop2,0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### curand_v1(): Time to generate curandStates and asset price paths on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; cudaEvent_t start3, stop3; cudaEventCreate(&start3); cudaEventCreate(&stop3); cudaEventRecord(start3,0); find_optimal_exercise_boundary_and_am_cash_flow<<<blocksPerGrid*h_indata.num_paths_per_thread, threadsPerBlock>>>(d_S, d_cash_flow, d_option_value, width, height, h_indata, d_x, d_h, d_optimal_exercise_boundary, d_cash_flow_am); cudaEventRecord(stop3,0); cudaEventSynchronize(stop3); cudaEventElapsedTime(&GPU_t, start3, stop3); #ifdef VERBOSE printf("\n### curand_v1(): Time to generate optimal exercise boundary on GPU: %fs\n", GPU_t/1000); #endif float sum_a = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ); if ( cudaSuccess != cuda_status ) { printf("curand_v1() Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(cudaFree(d_S)); checkError(cudaFree(d_x)); checkError(cudaFree(d_h)); checkError(cudaFree(d_cash_flow)); checkError(cudaFree(d_option_value)); checkError(cudaFree(d_cash_flow_am)); checkError(cudaFree(d_optimal_exercise_boundary)); checkError(cudaFree(d_states)); } /****************************************************************************/ /*! \brief Third wrapper function that calls the kernels. * * This function is the first wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * generates normally distributed random samples (for use in GPU), * then calls the kernel that uses the random samples to compute * asset price paths (one path per thread) and the kernel that * finds the optimal exercise boundary for american option. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_normrand_v2( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using normrand_v2()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_option_value = NULL; float *d_option_value_am = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaEvent_t startt, stopt; cudaEventCreate(&startt); cudaEventCreate(&stopt); cudaEventRecord(startt,0); checkError(cudaMalloc((void**)&d_option_value, width*sizeof(float))); checkError(cudaMalloc((void**)&d_option_value_am, width*sizeof(float))); cudaEventRecord(stopt,0); cudaEventSynchronize(stopt); cudaEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### normrand_v2(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/threadsPerBlock); cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventRecord(start1,0); random_normal normrnd; normrnd.zigset( 78542121 ); size_t size_norm = width*height*sizeof(float); float *h_norm_sample = (float *) malloc(size_norm); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_norm_sample[i*height+j] = normrnd.RNOR(); } } /*for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { h_norm_sample[j*width + i] = normrnd.RNOR(); } }*/ cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&GPU_t, start1, stop1); #ifdef VERBOSE printf("\n### normrand_v2(): Time to generate normal samples on CPU: %fs\n", GPU_t/1000); printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); printf(" - num-elemebts in d_norm_sample: %d\n", size_norm/4); cudaPrintfInit(); #endif float *d_norm_sample = NULL; checkError(cudaMalloc((void**)&d_norm_sample, size_norm)); checkError(cudaMemcpy(d_norm_sample, h_norm_sample, size_norm, cudaMemcpyHostToDevice)); cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2,0); find_cash_flows_and_option_values<<<blocksPerGrid,threadsPerBlock>>>(d_option_value, d_option_value_am, width, height, h_indata, d_norm_sample); cudaEventRecord(stop2,0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### normrand_v2(): Time to generate price paths and option values on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; thrust::device_ptr<float> dev_option_value_am_b(d_option_value_am); thrust::device_ptr<float> dev_option_value_am_e = dev_option_value_am_b + width; float sum_a = thrust::reduce(dev_option_value_am_b, dev_option_value_am_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_am_b, dev_option_value_am_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ); if ( cudaSuccess != cuda_status ) { printf("normrand_v2() Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(cudaFree(d_option_value)); checkError(cudaFree(d_option_value_am)); checkError(cudaFree(d_norm_sample)); } /****************************************************************************/ /*! \brief Fourth warpper function that calls the kernels. * * This function is the fourth wrapper to call the required kernel * functions. This function allocates all required memory on GPU, * initializes the 'curandStates' (for use in GPU, one for each thread), * then calls the kernel that uses the curandStates to compute * asset price paths (mutiple path per thread) also * finds the optimal exercise boundary for american options. Finally, * it uses thrust::reduce to find the option values. */ extern "C" void _gpu_find_option_values_using_curand_v2( result_set* r_set ) { #ifdef VERBOSE printf( "\nGPU COMPUTATION using curand_v2()\n=============================\n"); #endif // read the input file for options relating to the number of paths, number // of discrete time-steps etc. InputData h_indata; FileIO fileIO; fileIO.readInputFile((char*)"./input/options.txt", h_indata); float GPU_t = 0; // allocate memory to store all Monte Carlo paths, and intialize // the initial value of the asset at t=0. int num_paths = (h_indata.num_paths%2 == 0)?h_indata.num_paths:h_indata.num_paths+1; float *d_option_value = NULL; float *d_option_value_am = NULL; curandState *d_states = NULL; int width = num_paths; int height = h_indata.num_time_steps+1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaEvent_t startt, stopt; cudaEventCreate(&startt); cudaEventCreate(&stopt); cudaEventRecord(startt,0); h_indata.num_paths_per_thread = pow(2, ceil(log(h_indata.num_paths_per_thread)/log(2))); checkError(cudaMalloc((void**)&d_option_value, width*sizeof(float))); checkError(cudaMalloc((void**)&d_option_value_am, width*sizeof(float))); checkError(cudaMalloc((void**)&d_states, ceil(1.0*width/h_indata.num_paths_per_thread)*sizeof(curandState))); cudaEventRecord(stopt,0); cudaEventSynchronize(stopt); cudaEventElapsedTime(&GPU_t, startt, stopt); #ifdef VERBOSE printf("\n### curand_v2(): Time to initial cudamalloc: %fs\n", GPU_t/1000); #endif int threadsPerBlock = threads_per_block; int blocksPerGrid = (int)ceil( 1.0*width/(threadsPerBlock*h_indata.num_paths_per_thread) ); #ifdef VERBOSE printf(" - Blocks per Grid = %d\n", blocksPerGrid); printf(" - Threads per Block = %d\n", threadsPerBlock); cudaPrintfInit(); #endif cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventRecord(start1,0); generate_states<<<blocksPerGrid,threadsPerBlock>>>(h_indata.random_seed, d_states); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&GPU_t, start1, stop1); #ifdef VERBOSE printf("\n### curand_v2(): Time to initial crandStates on GPU: %fs\n", GPU_t/1000); #endif cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2,0); mp_find_cash_flows_and_option_value<<<blocksPerGrid,threadsPerBlock>>>(d_option_value, d_option_value_am, width, height, h_indata, d_states); cudaEventRecord(stop2,0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&GPU_t, start2, stop2); #ifdef VERBOSE printf("\n### curand_v2(): Time to generate price paths and option values on GPU: %fs\n", GPU_t/1000); #endif thrust::device_ptr<float> dev_option_value_b(d_option_value); thrust::device_ptr<float> dev_option_value_e = dev_option_value_b + width; float sum = thrust::reduce(dev_option_value_b, dev_option_value_e, (float)0, thrust::plus<float>()); float var_eu = thrust::transform_reduce(dev_option_value_b, dev_option_value_e, square<float>(), (float)0, thrust::plus<float>()); float european_option_value = sum/width; var_eu = (var_eu - pow(european_option_value, 2) )/width; thrust::device_ptr<float> dev_option_value_am_b(d_option_value_am); thrust::device_ptr<float> dev_option_value_am_e = dev_option_value_am_b + width; float sum_a = thrust::reduce(dev_option_value_am_b, dev_option_value_am_e, (float)0, thrust::plus<float>()); float var_am = thrust::transform_reduce(dev_option_value_am_b, dev_option_value_am_e, square<float>(), (float)0, thrust::plus<float>()); float american_option_value = sum_a/width; var_am = (var_am - pow(american_option_value, 2) )/width; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&GPU_t, start, stop); // show memory usage of GPU size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ); if ( cudaSuccess != cuda_status ) { printf("curand_v2() error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); //exit(1); } float delta_am = 1.96*sqrt(var_am/width)/american_option_value; float delta_eu = 1.96*sqrt(var_eu/width)/european_option_value; r_set->american_option_value = american_option_value; r_set->european_option_value = european_option_value; r_set->std_dev_am = sqrt(var_am); r_set->std_dev_eu = sqrt(var_eu); r_set->max_rel_error_am = 100*delta_am/(1-delta_am); r_set->max_rel_error_eu = 100*delta_eu/(1-delta_eu); r_set->net_clock_time = GPU_t/1000; r_set->memory_usage = (total_byte - free_byte)*9.53674e-7; #ifdef VERBOSE r_set->print_details( stdout ); cudaPrintfDisplay(stdout,true); cudaPrintfEnd(); #endif checkError(cudaFree(d_option_value)); checkError(cudaFree(d_option_value_am)); checkError(cudaFree(d_states)); }
270742a1c4c77666828666644f142742e5d53640.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include "dali/core/cuda_event.h" #include "dali/core/cuda_stream.h" #include "dali/test/dali_operator_test.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { template <typename OutputType, typename InputType, int batch_size_, int64_t num_classes_, int axis_, int64_t... Dims> struct OneHotTestParams { using Out = OutputType; using In = InputType; static constexpr int batch_size = batch_size_; static constexpr int64_t num_classes = num_classes_; static constexpr int axis = axis_; static constexpr int ndim = sizeof...(Dims); TensorShape<ndim> shape = {Dims...}; }; template <typename TestConfig> struct OneHotOpGpuPerfTest : public ::testing::Test { using Out = typename TestConfig::Out; using In = typename TestConfig::In; void SetUp() override { stream_ = HIPStreamMasqueradingAsCUDA::Create(true); auto &input_shape = config_.shape; sample_descs_tensor_.reshape(uniform_list_shape<1>(1, {config_.batch_size})); auto samples_cpu = sample_descs_tensor_.cpu()[0]; auto outer = input_shape.first(config_.axis); auto inner = input_shape.last(input_shape.size() - config_.axis); auto out_shape = shape_cat(shape_cat(outer, config_.num_classes), inner); auto input_list_shape = uniform_list_shape<TestConfig::ndim>(config_.batch_size, input_shape); auto out_list_shape = uniform_list_shape<TestConfig::ndim + 1>(config_.batch_size, out_shape); input_.reshape(input_list_shape); output_.reshape(out_list_shape); int num; auto seq_gen = [&num]() { return num = (num + 1) % TestConfig::num_classes; }; Fill(input_.cpu(), seq_gen); auto outer_vol = volume(input_shape.begin(), input_shape.begin() + config_.axis); auto inner_vol = volume(input_shape.begin() + config_.axis, input_shape.end()); auto inner_vol_classes = inner_vol * config_.num_classes; auto output_vol = outer_vol * inner_vol_classes; memset(samples_cpu.data, 0, TestConfig::batch_size * sizeof(detail::SampleDesc)); auto input_gpu = input_.gpu(stream_); auto output_gpu = output_.gpu(stream_); for (int sample_id = 0; sample_id < config_.batch_size; ++sample_id) { samples_cpu(sample_id)->inner_vol = inner_vol; samples_cpu(sample_id)->inner_vol_classes = inner_vol_classes; samples_cpu(sample_id)->output_vol = output_vol; samples_cpu(sample_id)->out = output_gpu[sample_id].data; samples_cpu(sample_id)->in = input_gpu[sample_id].data; } samples_gpu_ = sample_descs_tensor_.gpu(stream_)[0].data; } void MeasurePerf() { auto input_vol = volume(config_.shape); auto output_vol = input_vol * config_.num_classes; const int block = 256; auto grid = detail::gridHelper(output_vol, config_.batch_size, block); Out on_value = 1, off_value = 0; hipLaunchKernelGGL(( detail::PopulateOneHot<Out, In>), dim3(grid), dim3(block), 0, stream_, on_value, off_value, samples_gpu_); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); CUDA_CALL(hipEventRecord(start, stream_)); constexpr int kIters = 100; for (int i = 0; i < kIters; i++) { hipLaunchKernelGGL(( detail::PopulateOneHot<Out, In>), dim3(grid), dim3(block), 0, stream_, on_value, off_value, samples_gpu_); } CUDA_CALL(hipEventRecord(end, stream_)); CUDA_CALL(hipDeviceSynchronize()); float time; CUDA_CALL(hipEventElapsedTime(&time, start, end)); time *= (1e+6f / kIters); // convert to nanoseconds / 100 samples int64_t data_size = (input_vol * sizeof(In) + output_vol * sizeof(Out)) * config_.batch_size; std::cerr << "Throughput: " << data_size / time << " GB/s" << std::endl; } TestConfig config_{}; HIPStreamMasqueradingAsCUDA stream_; kernels::TestTensorList<In, TestConfig::ndim> input_; kernels::TestTensorList<Out, TestConfig::ndim + 1> output_; kernels::TestTensorList<typename detail::SampleDesc, 1> sample_descs_tensor_; const detail::SampleDesc *samples_gpu_; }; TYPED_TEST_SUITE_P(OneHotOpGpuPerfTest); TYPED_TEST_P(OneHotOpGpuPerfTest, Perf) { std::cerr << "batch_size: " << this->config_.batch_size << ", num_classes: " << this->config_.num_classes << ", sample_dim: " << this->config_.shape << std::endl; this->MeasurePerf(); } REGISTER_TYPED_TEST_SUITE_P(OneHotOpGpuPerfTest, Perf); using TestConfigs = ::testing::Types< OneHotTestParams<int, int, 1, 256, 0, 1024, 1024>, OneHotTestParams<int, int, 1, 256, 1, 1024, 1024>, OneHotTestParams<int, int, 1, 256, 2, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 0, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 1, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 2, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 0, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 1, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 2, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 0, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 1, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 2, 1024, 1024>, OneHotTestParams<int, int, 4, 8, 0, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 1, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 2, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 3, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 4, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 5, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 1024 * 256, 0, 1024>, OneHotTestParams<int, int, 4, 1024 * 256, 1, 1024>, OneHotTestParams<int, int, 16, 64, 0, 1024, 128>, OneHotTestParams<int, int, 16, 64, 1, 1024, 128>, OneHotTestParams<int, int, 16, 64, 2, 1024, 128> >; INSTANTIATE_TYPED_TEST_SUITE_P(OneHotOpGpu, OneHotOpGpuPerfTest, TestConfigs); } // namespace dali
270742a1c4c77666828666644f142742e5d53640.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include "dali/core/cuda_event.h" #include "dali/core/cuda_stream.h" #include "dali/test/dali_operator_test.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { template <typename OutputType, typename InputType, int batch_size_, int64_t num_classes_, int axis_, int64_t... Dims> struct OneHotTestParams { using Out = OutputType; using In = InputType; static constexpr int batch_size = batch_size_; static constexpr int64_t num_classes = num_classes_; static constexpr int axis = axis_; static constexpr int ndim = sizeof...(Dims); TensorShape<ndim> shape = {Dims...}; }; template <typename TestConfig> struct OneHotOpGpuPerfTest : public ::testing::Test { using Out = typename TestConfig::Out; using In = typename TestConfig::In; void SetUp() override { stream_ = CUDAStream::Create(true); auto &input_shape = config_.shape; sample_descs_tensor_.reshape(uniform_list_shape<1>(1, {config_.batch_size})); auto samples_cpu = sample_descs_tensor_.cpu()[0]; auto outer = input_shape.first(config_.axis); auto inner = input_shape.last(input_shape.size() - config_.axis); auto out_shape = shape_cat(shape_cat(outer, config_.num_classes), inner); auto input_list_shape = uniform_list_shape<TestConfig::ndim>(config_.batch_size, input_shape); auto out_list_shape = uniform_list_shape<TestConfig::ndim + 1>(config_.batch_size, out_shape); input_.reshape(input_list_shape); output_.reshape(out_list_shape); int num; auto seq_gen = [&num]() { return num = (num + 1) % TestConfig::num_classes; }; Fill(input_.cpu(), seq_gen); auto outer_vol = volume(input_shape.begin(), input_shape.begin() + config_.axis); auto inner_vol = volume(input_shape.begin() + config_.axis, input_shape.end()); auto inner_vol_classes = inner_vol * config_.num_classes; auto output_vol = outer_vol * inner_vol_classes; memset(samples_cpu.data, 0, TestConfig::batch_size * sizeof(detail::SampleDesc)); auto input_gpu = input_.gpu(stream_); auto output_gpu = output_.gpu(stream_); for (int sample_id = 0; sample_id < config_.batch_size; ++sample_id) { samples_cpu(sample_id)->inner_vol = inner_vol; samples_cpu(sample_id)->inner_vol_classes = inner_vol_classes; samples_cpu(sample_id)->output_vol = output_vol; samples_cpu(sample_id)->out = output_gpu[sample_id].data; samples_cpu(sample_id)->in = input_gpu[sample_id].data; } samples_gpu_ = sample_descs_tensor_.gpu(stream_)[0].data; } void MeasurePerf() { auto input_vol = volume(config_.shape); auto output_vol = input_vol * config_.num_classes; const int block = 256; auto grid = detail::gridHelper(output_vol, config_.batch_size, block); Out on_value = 1, off_value = 0; detail::PopulateOneHot<Out, In><<<grid, block, 0, stream_>>>(on_value, off_value, samples_gpu_); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); CUDA_CALL(cudaEventRecord(start, stream_)); constexpr int kIters = 100; for (int i = 0; i < kIters; i++) { detail::PopulateOneHot<Out, In><<<grid, block, 0, stream_>>>( on_value, off_value, samples_gpu_); } CUDA_CALL(cudaEventRecord(end, stream_)); CUDA_CALL(cudaDeviceSynchronize()); float time; CUDA_CALL(cudaEventElapsedTime(&time, start, end)); time *= (1e+6f / kIters); // convert to nanoseconds / 100 samples int64_t data_size = (input_vol * sizeof(In) + output_vol * sizeof(Out)) * config_.batch_size; std::cerr << "Throughput: " << data_size / time << " GB/s" << std::endl; } TestConfig config_{}; CUDAStream stream_; kernels::TestTensorList<In, TestConfig::ndim> input_; kernels::TestTensorList<Out, TestConfig::ndim + 1> output_; kernels::TestTensorList<typename detail::SampleDesc, 1> sample_descs_tensor_; const detail::SampleDesc *samples_gpu_; }; TYPED_TEST_SUITE_P(OneHotOpGpuPerfTest); TYPED_TEST_P(OneHotOpGpuPerfTest, Perf) { std::cerr << "batch_size: " << this->config_.batch_size << ", num_classes: " << this->config_.num_classes << ", sample_dim: " << this->config_.shape << std::endl; this->MeasurePerf(); } REGISTER_TYPED_TEST_SUITE_P(OneHotOpGpuPerfTest, Perf); using TestConfigs = ::testing::Types< OneHotTestParams<int, int, 1, 256, 0, 1024, 1024>, OneHotTestParams<int, int, 1, 256, 1, 1024, 1024>, OneHotTestParams<int, int, 1, 256, 2, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 0, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 1, 1024, 1024>, OneHotTestParams<int64_t, int, 1, 256, 2, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 0, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 1, 1024, 1024>, OneHotTestParams<int64_t, int64_t, 1, 256, 2, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 0, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 1, 1024, 1024>, OneHotTestParams<int8_t, int8_t, 1, 256, 2, 1024, 1024>, OneHotTestParams<int, int, 4, 8, 0, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 1, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 2, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 3, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 4, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 8, 5, 32, 32, 32, 32, 32>, OneHotTestParams<int, int, 4, 1024 * 256, 0, 1024>, OneHotTestParams<int, int, 4, 1024 * 256, 1, 1024>, OneHotTestParams<int, int, 16, 64, 0, 1024, 128>, OneHotTestParams<int, int, 16, 64, 1, 1024, 128>, OneHotTestParams<int, int, 16, 64, 2, 1024, 128> >; INSTANTIATE_TYPED_TEST_SUITE_P(OneHotOpGpu, OneHotOpGpuPerfTest, TestConfigs); } // namespace dali
34d5adc4d276a79adcafc2b9f894cdcf61517a6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" namespace cv { namespace cuda { namespace device { // // namespace imgproc // { __device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){ int tx = threadIdx.x; if (tx<8){ H[tx]+=hist_colAdd[tx]-hist_colSub[tx]; } } __device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){ int tx = threadIdx.x; if (tx<8){ int temp=H[tx]; for(int i=0; i<histCount; i++) temp+=hist_col[(i<<3)+tx]; H[tx]=temp; } } __device__ void histogramClear8(int* H){ int tx = threadIdx.x; if (tx<8){ H[tx]=0; } } __device__ void histogramAdd8(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<8){ H[tx]+=hist_col[tx]; } } __device__ void histogramSub8(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<8){ H[tx]-=hist_col[tx]; } } __device__ void histogramAdd32(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<32){ H[tx]+=hist_col[tx]; } } __device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){ int tx = threadIdx.x; if (tx<32){ H[tx]+=hist_colAdd[tx]-hist_colSub[tx]; } } __device__ void histogramClear32(int* H){ int tx = threadIdx.x; if (tx<32){ H[tx]=0; } } __device__ void lucClear8(int* luc){ int tx = threadIdx.x; if (tx<8) luc[tx]=0; } __device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){ int tx=threadIdx.x; *retval=*countAtMed=0; if(tx<8){ Hscan[tx]=H[tx]; } __syncthreads(); if(tx<8){ if(tx>=1 ) Hscan[tx]+=Hscan[tx-1]; if(tx>=2) Hscan[tx]+=Hscan[tx-2]; if(tx>=4) Hscan[tx]+=Hscan[tx-4]; } __syncthreads(); if(tx<7){ if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } else if(Hscan[tx]==medPos){ if(Hscan[tx+1]>medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } } } } __device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){ int tx=threadIdx.x; *retval=*countAtMed=0; if(tx<32){ Hscan[tx]=H[tx]; } __syncthreads(); if(tx<32){ if(tx>=1) Hscan[tx]+=Hscan[tx-1]; if(tx>=2) Hscan[tx]+=Hscan[tx-2]; if(tx>=4) Hscan[tx]+=Hscan[tx-4]; if(tx>=8) Hscan[tx]+=Hscan[tx-8]; if(tx>=16) Hscan[tx]+=Hscan[tx-16]; } __syncthreads(); if(tx<31){ if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } else if(Hscan[tx]==medPos){ if(Hscan[tx+1]>medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } } } } __global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_) { __shared__ int HCoarse[8]; __shared__ int HCoarseScan[32]; __shared__ int HFine[8][32]; __shared__ int luc[8]; __shared__ int firstBin,countAtMed, retval; int rows = src.rows, cols=src.cols; int extraRowThread=rows%gridDim.x; int doExtraRow=blockIdx.x<extraRowThread; int startRow=0, stopRow=0; int rowsPerBlock= rows/gridDim.x+doExtraRow; // The following code partitions the work to the blocks. Some blocks will do one row more // than other blocks. This code is responsible for doing that balancing if(doExtraRow){ startRow=rowsPerBlock*blockIdx.x; stopRow=::min(rows, startRow+rowsPerBlock); } else{ startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread); stopRow=::min(rows, startRow+rowsPerBlock); } int* hist= histPar.data+cols*256*blockIdx.x; int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x; if (blockIdx.x==(gridDim.x-1)) stopRow=rows; __syncthreads(); int initNeeded=0, initVal, initStartRow, initStopRow; if(blockIdx.x==0){ initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r; } else if (startRow<(r+2)){ initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow; } else{ initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow; } __syncthreads(); // In the original algorithm an initialization phase was required as part of the window was outside the // image. In this parallel version, the initializtion is required for all thread blocks that part // of the median filter is outside the window. // For all threads in the block the same code will be executed. if (initNeeded){ for (int j=threadIdx.x; j<(cols); j+=blockDim.x){ hist[j*256+src.ptr(0)[j]]=initVal; histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal; } } __syncthreads(); // Fot all remaining rows in the median filter, add the values to the the histogram for (int j=threadIdx.x; j<cols; j+=blockDim.x){ for(int i=initStartRow; i<initStopRow; i++){ int pos=::min(i,rows-1); hist[j*256+src.ptr(pos)[j]]++; histCoarse[j*8+(src.ptr(pos)[j]>>5)]++; } } __syncthreads(); // Going through all the rows that the block is responsible for. int inc=blockDim.x*256; int incCoarse=blockDim.x*8; for(int i=startRow; i< stopRow; i++){ // For every new row that is started the global histogram for the entire window is restarted. histogramClear8(HCoarse); lucClear8(luc); // Computing some necessary indices int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r); int histPos=threadIdx.x*256; int histCoarsePos=threadIdx.x*8; // Going through all the elements of a specific row. Foeach histogram, a value is taken out and // one value is added. for (int j=threadIdx.x; j<cols; j+=blockDim.x){ hist[histPos+ src.ptr(possub)[j] ]--; hist[histPos+ src.ptr(posadd)[j] ]++; histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--; histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++; histPos+=inc; histCoarsePos+=incCoarse; } __syncthreads(); histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1); // __syncthreads(); int cols_m_1=cols-1; for(int j=r;j<cols-r;j++){ int possub=::max(j-r,0); int posadd=::min(j+1+r,cols_m_1); int medPos=medPos_; __syncthreads(); histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed); __syncthreads(); if ( luc[firstBin] <= (j-r)) { histogramClear32(HFine[firstBin]); for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){ histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) ); } } else{ for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) { histogramAddAndSub32(HFine[firstBin], hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ), hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) ); __syncthreads(); } } __syncthreads(); int leftOver=medPos-countAtMed; if(leftOver>=0){ histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed); } else retval=0; __syncthreads(); if (threadIdx.x==0){ dest.ptr(i)[j]=(firstBin<<5) + retval; } histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3)); __syncthreads(); } __syncthreads(); } } void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,hipStream_t stream){ int medPos=2*kernel*kernel+2*kernel; dim3 gridDim; gridDim.x=partitions; dim3 blockDim; blockDim.x=32; hipLaunchKernelGGL(( cuMedianFilterMultiBlock), dim3(gridDim),dim3(blockDim),0, stream, src, dst, devHist,devCoarseHist, kernel, medPos); if (!stream) cudaSafeCall( hipDeviceSynchronize() ); } }}} #endif
34d5adc4d276a79adcafc2b9f894cdcf61517a6a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" namespace cv { namespace cuda { namespace device { // // namespace imgproc // { __device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){ int tx = threadIdx.x; if (tx<8){ H[tx]+=hist_colAdd[tx]-hist_colSub[tx]; } } __device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){ int tx = threadIdx.x; if (tx<8){ int temp=H[tx]; for(int i=0; i<histCount; i++) temp+=hist_col[(i<<3)+tx]; H[tx]=temp; } } __device__ void histogramClear8(int* H){ int tx = threadIdx.x; if (tx<8){ H[tx]=0; } } __device__ void histogramAdd8(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<8){ H[tx]+=hist_col[tx]; } } __device__ void histogramSub8(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<8){ H[tx]-=hist_col[tx]; } } __device__ void histogramAdd32(int* H, const int * hist_col){ int tx = threadIdx.x; if (tx<32){ H[tx]+=hist_col[tx]; } } __device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){ int tx = threadIdx.x; if (tx<32){ H[tx]+=hist_colAdd[tx]-hist_colSub[tx]; } } __device__ void histogramClear32(int* H){ int tx = threadIdx.x; if (tx<32){ H[tx]=0; } } __device__ void lucClear8(int* luc){ int tx = threadIdx.x; if (tx<8) luc[tx]=0; } __device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){ int tx=threadIdx.x; *retval=*countAtMed=0; if(tx<8){ Hscan[tx]=H[tx]; } __syncthreads(); if(tx<8){ if(tx>=1 ) Hscan[tx]+=Hscan[tx-1]; if(tx>=2) Hscan[tx]+=Hscan[tx-2]; if(tx>=4) Hscan[tx]+=Hscan[tx-4]; } __syncthreads(); if(tx<7){ if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } else if(Hscan[tx]==medPos){ if(Hscan[tx+1]>medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } } } } __device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){ int tx=threadIdx.x; *retval=*countAtMed=0; if(tx<32){ Hscan[tx]=H[tx]; } __syncthreads(); if(tx<32){ if(tx>=1) Hscan[tx]+=Hscan[tx-1]; if(tx>=2) Hscan[tx]+=Hscan[tx-2]; if(tx>=4) Hscan[tx]+=Hscan[tx-4]; if(tx>=8) Hscan[tx]+=Hscan[tx-8]; if(tx>=16) Hscan[tx]+=Hscan[tx-16]; } __syncthreads(); if(tx<31){ if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } else if(Hscan[tx]==medPos){ if(Hscan[tx+1]>medPos){ *retval=tx+1; *countAtMed=Hscan[tx]; } } } } __global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_) { __shared__ int HCoarse[8]; __shared__ int HCoarseScan[32]; __shared__ int HFine[8][32]; __shared__ int luc[8]; __shared__ int firstBin,countAtMed, retval; int rows = src.rows, cols=src.cols; int extraRowThread=rows%gridDim.x; int doExtraRow=blockIdx.x<extraRowThread; int startRow=0, stopRow=0; int rowsPerBlock= rows/gridDim.x+doExtraRow; // The following code partitions the work to the blocks. Some blocks will do one row more // than other blocks. This code is responsible for doing that balancing if(doExtraRow){ startRow=rowsPerBlock*blockIdx.x; stopRow=::min(rows, startRow+rowsPerBlock); } else{ startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread); stopRow=::min(rows, startRow+rowsPerBlock); } int* hist= histPar.data+cols*256*blockIdx.x; int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x; if (blockIdx.x==(gridDim.x-1)) stopRow=rows; __syncthreads(); int initNeeded=0, initVal, initStartRow, initStopRow; if(blockIdx.x==0){ initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r; } else if (startRow<(r+2)){ initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow; } else{ initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow; } __syncthreads(); // In the original algorithm an initialization phase was required as part of the window was outside the // image. In this parallel version, the initializtion is required for all thread blocks that part // of the median filter is outside the window. // For all threads in the block the same code will be executed. if (initNeeded){ for (int j=threadIdx.x; j<(cols); j+=blockDim.x){ hist[j*256+src.ptr(0)[j]]=initVal; histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal; } } __syncthreads(); // Fot all remaining rows in the median filter, add the values to the the histogram for (int j=threadIdx.x; j<cols; j+=blockDim.x){ for(int i=initStartRow; i<initStopRow; i++){ int pos=::min(i,rows-1); hist[j*256+src.ptr(pos)[j]]++; histCoarse[j*8+(src.ptr(pos)[j]>>5)]++; } } __syncthreads(); // Going through all the rows that the block is responsible for. int inc=blockDim.x*256; int incCoarse=blockDim.x*8; for(int i=startRow; i< stopRow; i++){ // For every new row that is started the global histogram for the entire window is restarted. histogramClear8(HCoarse); lucClear8(luc); // Computing some necessary indices int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r); int histPos=threadIdx.x*256; int histCoarsePos=threadIdx.x*8; // Going through all the elements of a specific row. Foeach histogram, a value is taken out and // one value is added. for (int j=threadIdx.x; j<cols; j+=blockDim.x){ hist[histPos+ src.ptr(possub)[j] ]--; hist[histPos+ src.ptr(posadd)[j] ]++; histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--; histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++; histPos+=inc; histCoarsePos+=incCoarse; } __syncthreads(); histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1); // __syncthreads(); int cols_m_1=cols-1; for(int j=r;j<cols-r;j++){ int possub=::max(j-r,0); int posadd=::min(j+1+r,cols_m_1); int medPos=medPos_; __syncthreads(); histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed); __syncthreads(); if ( luc[firstBin] <= (j-r)) { histogramClear32(HFine[firstBin]); for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){ histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) ); } } else{ for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) { histogramAddAndSub32(HFine[firstBin], hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ), hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) ); __syncthreads(); } } __syncthreads(); int leftOver=medPos-countAtMed; if(leftOver>=0){ histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed); } else retval=0; __syncthreads(); if (threadIdx.x==0){ dest.ptr(i)[j]=(firstBin<<5) + retval; } histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3)); __syncthreads(); } __syncthreads(); } } void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,cudaStream_t stream){ int medPos=2*kernel*kernel+2*kernel; dim3 gridDim; gridDim.x=partitions; dim3 blockDim; blockDim.x=32; cuMedianFilterMultiBlock<<<gridDim,blockDim,0, stream>>>(src, dst, devHist,devCoarseHist, kernel, medPos); if (!stream) cudaSafeCall( cudaDeviceSynchronize() ); } }}} #endif
dcd678d13fcddfb1c6ba2815f1f6ee29056848ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfInd; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) { const int opCode = (*intArgs)[0]; const int numOfDims = (*intArgs)[1]; const int numOfInd = (*intArgs)[2 + numOfDims]; std::vector<int> tadDimensions(numOfDims); for (int e = 2; e < 2 + numOfDims; e++) tadDimensions[e-2] = (*intArgs)[e]; auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions); NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context); PointersManager manager(context, "scatterUpdate"); NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices}); BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES); NDArray::registerSpecialUse({&input}, {&input, &updates, &indices}); manager.synchronize(); } } } }
dcd678d13fcddfb1c6ba2815f1f6ee29056848ab.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfInd; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) { const int opCode = (*intArgs)[0]; const int numOfDims = (*intArgs)[1]; const int numOfInd = (*intArgs)[2 + numOfDims]; std::vector<int> tadDimensions(numOfDims); for (int e = 2; e < 2 + numOfDims; e++) tadDimensions[e-2] = (*intArgs)[e]; auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions); NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context); PointersManager manager(context, "scatterUpdate"); NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices}); BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES); NDArray::registerSpecialUse({&input}, {&input, &updates, &indices}); manager.synchronize(); } } } }
dce9aa78132a38baccbaf9e4744aa60e10ac07fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "src/cuda/conv_bias/matmul/inplace_matmul_impl.cuh" #include "src/cuda/kernel_common/diagnostic_prologue.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace cuda; namespace { struct BufferFetcherTexture { hipTextureObject_t tex; __device__ __forceinline__ float get(uint32_t offset) { return tex1Dfetch<float>(tex, offset); } }; struct BufferFetcherRaw { const float* ptr; __device__ __forceinline__ float get(uint32_t offset) { return ptr[offset]; } }; struct BufferFetcherTextureHost { bool init_succ; BufferFetcherTexture val; BufferFetcherTextureHost(float* p, const size_t n); ~BufferFetcherTextureHost() { reset(); } void reset() { if (init_succ) { cuda_check(hipDestroyTextureObject(val.tex)); init_succ = false; } } }; BufferFetcherTextureHost::BufferFetcherTextureHost(float* p, const size_t n) { init_succ = false; hipTextureObject_t tex_obj; hipResourceDesc res_desc; memset(&res_desc, 0, sizeof(hipResourceDesc)); res_desc.resType = hipResourceTypeLinear; res_desc.res.linear.devPtr = static_cast<void*>(p); res_desc.res.linear.sizeInBytes = n * sizeof(float); res_desc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(hipTextureDesc)); if (hipCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL) == hipSuccess) { val.tex = tex_obj; init_succ = true; } else { hipGetLastError(); // reset error } } template <class BufferFetcher> struct KernelPtr { typedef void (*type)( BufferFetcher, BufferFetcher, float*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); }; //! 1 -> 0xffffffff, 0 -> 0x00000000 __device__ __forceinline__ uint32_t bool_as_mask(uint32_t cond) { return (!cond) - 1u; } union FloatAndU32 { float f; uint32_t u; }; //! \p mask must be either all 1 or 0 bits template <class BufferFetcher> __device__ __forceinline__ float visit_with_mask( BufferFetcher buf, uint32_t offset, uint32_t mask) { FloatAndU32 f; f.f = buf.get(offset & mask); f.u &= mask; return f.f; } template <uint32_t BY, uint32_t BX, bool is_xcorr, class BufferFetcher> __global__ void conv_kernel( BufferFetcher src, BufferFetcher filter, float* dst, const uint32_t INP_BS, const uint32_t OUT_BS, const uint32_t IC, const uint32_t IH, const uint32_t IW, const uint32_t OC, const uint32_t OH, const uint32_t OW, const uint32_t FH, const uint32_t FW, const uint32_t SH, const uint32_t SW, const uint32_t PH, const uint32_t PW) { const uint32_t BM = BY < BX ? BY : BX; // BY*BX == 256 // (OC) * (IC*FH*FW) * (OH*OW) const uint32_t n = blockIdx.z; const uint32_t tidx = threadIdx.x; const uint32_t tidy = threadIdx.y; const uint32_t posx = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t posy = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t posx2 = posx << 2; const uint32_t posy2 = posy << 2; const uint32_t heightA = OC; const uint32_t widthA = IC * FH * FW; const uint32_t heightB = widthA; const uint32_t widthB = OH * OW; const uint32_t oh0 = (posx2 + 0) / OW * SH; const uint32_t ow0 = (posx2 + 0) % OW * SW; const uint32_t op0 = oh0 * IW + ow0; const uint32_t oh1 = (posx2 + 1) / OW * SH; const uint32_t ow1 = (posx2 + 1) % OW * SW; const uint32_t op1 = oh1 * IW + ow1; const uint32_t oh2 = (posx2 + 2) / OW * SH; const uint32_t ow2 = (posx2 + 2) % OW * SW; const uint32_t op2 = oh2 * IW + ow2; const uint32_t oh3 = (posx2 + 3) / OW * SH; const uint32_t ow3 = (posx2 + 3) % OW * SW; const uint32_t op3 = oh3 * IW + ow3; const uint32_t FP = FH * FW; // OC % (BLOCK*4) == 0 // IC*FH*FW % BLOCK == 0 // OH*OW % (BLOCK*4) == 0 __shared__ float4 localA[BY][BM]; __shared__ float4 localB[BM][BX]; uint32_t i = 0u; uint32_t offsetA = posy2 * widthA + tidx; uint32_t offsetB = n * INP_BS - PH * IW - PW; float4 sum0 = {0.0f, 0.0f, 0.0f, 0.0f}, sum1 = {0.0f, 0.0f, 0.0f, 0.0f}, sum2 = {0.0f, 0.0f, 0.0f, 0.0f}, sum3 = {0.0f, 0.0f, 0.0f, 0.0f}; uint32_t fh = tidy / FW % FH; uint32_t fw = tidy % FW; uint32_t ic = tidy / (FH * FW); uint32_t icm = tidy % (FH * FW); const uint32_t fhs = BM / FW % FH; const uint32_t fws = BM % FW; const uint32_t ics = BM / (FH * FW); const uint32_t icms = BM % (FH * FW); for (; i < widthA; i += BM, offsetA += BM) { // load localA if (tidx < BM) { localA[tidy][tidx].x = filter.get(offsetA + 0 * widthA); localA[tidy][tidx].y = filter.get(offsetA + 1 * widthA); localA[tidy][tidx].z = filter.get(offsetA + 2 * widthA); localA[tidy][tidx].w = filter.get(offsetA + 3 * widthA); } // load localB /* const uint32_t fh_t = (tidy+i) / FW % FH; const uint32_t fw_t = (tidy+i) % FW; const uint32_t ic_t = (tidy+i) / (FH*FW); if (fh != fh_t) printf("fh=%d, fh_t=%d\n", fh, fh_t); if (fw != fw_t) printf("fw=%d, fw_t=%d\n", fw, fw_t); if (ic != ic_t) printf("ic=%d, ic_t=%d\n", ic, ic_t); */ uint32_t fh2, fw2; if (is_xcorr) { fh2 = fh; fw2 = fw; } else { fh2 = FH - fh - 1; fw2 = FW - fw - 1; } if (tidy < BM) { uint32_t tmp = offsetB + (ic * IH + (fh2)) * IW + (fw2), ok = bool_as_mask(tidy + i < heightB), p0 = bool_as_mask( fh2 + oh0 >= PH && fh2 + oh0 < IH + PH && fw2 + ow0 >= PW && fw2 + ow0 < IW + PW), p1 = bool_as_mask( fh2 + oh1 >= PH && fh2 + oh1 < IH + PH && fw2 + ow1 >= PW && fw2 + ow1 < IW + PW), p2 = bool_as_mask( fh2 + oh2 >= PH && fh2 + oh2 < IH + PH && fw2 + ow2 >= PW && fw2 + ow2 < IW + PW), p3 = bool_as_mask( fh2 + oh3 >= PH && fh2 + oh3 < IH + PH && fw2 + ow3 >= PW && fw2 + ow3 < IW + PW); localB[tidy][tidx].x = visit_with_mask(src, tmp + op0, ok & p0); localB[tidy][tidx].y = visit_with_mask(src, tmp + op1, ok & p1); localB[tidy][tidx].z = visit_with_mask(src, tmp + op2, ok & p2); localB[tidy][tidx].w = visit_with_mask(src, tmp + op3, ok & p3); } __syncthreads(); for (uint32_t j = 0u; j < BM; ++j) { float4 tmpA = localA[tidy][j]; float4 tmpB = localB[j][tidx]; sum0.x += tmpA.x * tmpB.x; sum0.y += tmpA.x * tmpB.y; sum0.z += tmpA.x * tmpB.z; sum0.w += tmpA.x * tmpB.w; sum1.x += tmpA.y * tmpB.x; sum1.y += tmpA.y * tmpB.y; sum1.z += tmpA.y * tmpB.z; sum1.w += tmpA.y * tmpB.w; sum2.x += tmpA.z * tmpB.x; sum2.y += tmpA.z * tmpB.y; sum2.z += tmpA.z * tmpB.z; sum2.w += tmpA.z * tmpB.w; sum3.x += tmpA.w * tmpB.x; sum3.y += tmpA.w * tmpB.y; sum3.z += tmpA.w * tmpB.z; sum3.w += tmpA.w * tmpB.w; } fw += fws; fh += fhs; fh += (fw >= FW); fh -= (fh >= FH) * FH; fw -= (fw >= FW) * FW; ic += ics; icm += icms; ic += (icm >= FP); icm -= (icm >= FP) * FP; __syncthreads(); } const uint32_t dst_idx = n * OUT_BS + posy2 * widthB + posx2; bool y0 = (posy2 + 0 < heightA); bool y1 = (posy2 + 1 < heightA); bool y2 = (posy2 + 2 < heightA); bool y3 = (posy2 + 3 < heightA); bool x0 = (posx2 + 0 < widthB); bool x1 = (posx2 + 1 < widthB); bool x2 = (posx2 + 2 < widthB); bool x3 = (posx2 + 3 < widthB); if (y0) { if (x0) dst[dst_idx + 0 * widthB + 0] = sum0.x; if (x1) dst[dst_idx + 0 * widthB + 1] = sum0.y; if (x2) dst[dst_idx + 0 * widthB + 2] = sum0.z; if (x3) dst[dst_idx + 0 * widthB + 3] = sum0.w; } if (y1) { if (x0) dst[dst_idx + 1 * widthB + 0] = sum1.x; if (x1) dst[dst_idx + 1 * widthB + 1] = sum1.y; if (x2) dst[dst_idx + 1 * widthB + 2] = sum1.z; if (x3) dst[dst_idx + 1 * widthB + 3] = sum1.w; } if (y2) { if (x0) dst[dst_idx + 2 * widthB + 0] = sum2.x; if (x1) dst[dst_idx + 2 * widthB + 1] = sum2.y; if (x2) dst[dst_idx + 2 * widthB + 2] = sum2.z; if (x3) dst[dst_idx + 2 * widthB + 3] = sum2.w; } if (y3) { if (x0) dst[dst_idx + 3 * widthB + 0] = sum3.x; if (x1) dst[dst_idx + 3 * widthB + 1] = sum3.y; if (x2) dst[dst_idx + 3 * widthB + 2] = sum3.z; if (x3) dst[dst_idx + 3 * widthB + 3] = sum3.w; } } } // anonymous namespace void conv_bias::exec_inplace_matmul_fwd( const float* src, const float* filter, float* dst, size_t N, size_t INP_BS, size_t OUT_BS, size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH, size_t FW, size_t PH, size_t PW, size_t SH, size_t SW, bool is_xcorr, hipStream_t stream) { BufferFetcherTextureHost src_tex(const_cast<float*>(src), N * INP_BS), filter_tex(const_cast<float*>(filter), OC * IC * FH * FW); BufferFetcherRaw src_buf, filter_buf; src_buf.ptr = src; filter_buf.ptr = filter; if (!src_tex.init_succ || !filter_tex.init_succ) { src_tex.reset(); filter_tex.reset(); } int m = OC; int n = OH * OW; int BY = 1; int BX = 1; if (m <= 64) { while (BY < 16 && (BY << 2) < m) BY <<= 1; BX = 256 / BY; } else if (n <= 64) { while (BX < 16 && (BX << 2) < n) BX <<= 1; BY = 256 / BX; } else { BX = BY = 16; } dim3 blocks((OH * OW + BX * 4 - 1) / (BX * 4), (OC + BY * 4 - 1) / (BY * 4), N); dim3 threads(BX, BY); #define DISPATCH_BX_BY(BX, BY) \ do { \ if (src_tex.init_succ) { \ KernelPtr<BufferFetcherTexture>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherTexture>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherTexture>; \ } \ hipLaunchKernelGGL(( kptr), dim3(blocks), dim3(threads), 0, stream, \ src_tex.val, filter_tex.val, dst, INP_BS, OUT_BS, IC, IH, IW, OC, \ OH, OW, FH, FW, SH, SW, PH, PW); \ } else { \ KernelPtr<BufferFetcherRaw>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherRaw>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherRaw>; \ } \ hipLaunchKernelGGL(( kptr), dim3(blocks), dim3(threads), 0, stream, \ src_buf, filter_buf, dst, INP_BS, OUT_BS, IC, IH, IW, OC, OH, OW, \ FH, FW, SH, SW, PH, PW); \ } \ } while (0) #define DISPATCH_BX(BX) \ do { \ DISPATCH_BX_BY(BX, 256 / BX); \ } while (0) #define DISPATCH() \ do { \ switch (BX) { \ case 1: \ DISPATCH_BX(1); \ break; \ case 2: \ DISPATCH_BX(2); \ break; \ case 4: \ DISPATCH_BX(4); \ break; \ case 8: \ DISPATCH_BX(8); \ break; \ case 16: \ DISPATCH_BX(16); \ break; \ case 32: \ DISPATCH_BX(32); \ break; \ case 64: \ DISPATCH_BX(64); \ break; \ case 128: \ DISPATCH_BX(128); \ break; \ case 256: \ DISPATCH_BX(256); \ break; \ default: \ report_error("no usable kernel"); \ } \ } while (0) DISPATCH(); #undef DISPATCH #undef DISPATCH_BX #undef DISPATCH_BX_BY after_kernel_launch(); } #include "src/cuda/kernel_common/diagnostic_epilogue.cuh" // vim: syntax=cpp.doxygen
dce9aa78132a38baccbaf9e4744aa60e10ac07fb.cu
#include "src/cuda/conv_bias/matmul/inplace_matmul_impl.cuh" #include "src/cuda/kernel_common/diagnostic_prologue.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace cuda; namespace { struct BufferFetcherTexture { cudaTextureObject_t tex; __device__ __forceinline__ float get(uint32_t offset) { return tex1Dfetch<float>(tex, offset); } }; struct BufferFetcherRaw { const float* ptr; __device__ __forceinline__ float get(uint32_t offset) { return ptr[offset]; } }; struct BufferFetcherTextureHost { bool init_succ; BufferFetcherTexture val; BufferFetcherTextureHost(float* p, const size_t n); ~BufferFetcherTextureHost() { reset(); } void reset() { if (init_succ) { cuda_check(cudaDestroyTextureObject(val.tex)); init_succ = false; } } }; BufferFetcherTextureHost::BufferFetcherTextureHost(float* p, const size_t n) { init_succ = false; cudaTextureObject_t tex_obj; cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(cudaResourceDesc)); res_desc.resType = cudaResourceTypeLinear; res_desc.res.linear.devPtr = static_cast<void*>(p); res_desc.res.linear.sizeInBytes = n * sizeof(float); res_desc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(cudaTextureDesc)); if (cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL) == cudaSuccess) { val.tex = tex_obj; init_succ = true; } else { cudaGetLastError(); // reset error } } template <class BufferFetcher> struct KernelPtr { typedef void (*type)( BufferFetcher, BufferFetcher, float*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); }; //! 1 -> 0xffffffff, 0 -> 0x00000000 __device__ __forceinline__ uint32_t bool_as_mask(uint32_t cond) { return (!cond) - 1u; } union FloatAndU32 { float f; uint32_t u; }; //! \p mask must be either all 1 or 0 bits template <class BufferFetcher> __device__ __forceinline__ float visit_with_mask( BufferFetcher buf, uint32_t offset, uint32_t mask) { FloatAndU32 f; f.f = buf.get(offset & mask); f.u &= mask; return f.f; } template <uint32_t BY, uint32_t BX, bool is_xcorr, class BufferFetcher> __global__ void conv_kernel( BufferFetcher src, BufferFetcher filter, float* dst, const uint32_t INP_BS, const uint32_t OUT_BS, const uint32_t IC, const uint32_t IH, const uint32_t IW, const uint32_t OC, const uint32_t OH, const uint32_t OW, const uint32_t FH, const uint32_t FW, const uint32_t SH, const uint32_t SW, const uint32_t PH, const uint32_t PW) { const uint32_t BM = BY < BX ? BY : BX; // BY*BX == 256 // (OC) * (IC*FH*FW) * (OH*OW) const uint32_t n = blockIdx.z; const uint32_t tidx = threadIdx.x; const uint32_t tidy = threadIdx.y; const uint32_t posx = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t posy = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t posx2 = posx << 2; const uint32_t posy2 = posy << 2; const uint32_t heightA = OC; const uint32_t widthA = IC * FH * FW; const uint32_t heightB = widthA; const uint32_t widthB = OH * OW; const uint32_t oh0 = (posx2 + 0) / OW * SH; const uint32_t ow0 = (posx2 + 0) % OW * SW; const uint32_t op0 = oh0 * IW + ow0; const uint32_t oh1 = (posx2 + 1) / OW * SH; const uint32_t ow1 = (posx2 + 1) % OW * SW; const uint32_t op1 = oh1 * IW + ow1; const uint32_t oh2 = (posx2 + 2) / OW * SH; const uint32_t ow2 = (posx2 + 2) % OW * SW; const uint32_t op2 = oh2 * IW + ow2; const uint32_t oh3 = (posx2 + 3) / OW * SH; const uint32_t ow3 = (posx2 + 3) % OW * SW; const uint32_t op3 = oh3 * IW + ow3; const uint32_t FP = FH * FW; // OC % (BLOCK*4) == 0 // IC*FH*FW % BLOCK == 0 // OH*OW % (BLOCK*4) == 0 __shared__ float4 localA[BY][BM]; __shared__ float4 localB[BM][BX]; uint32_t i = 0u; uint32_t offsetA = posy2 * widthA + tidx; uint32_t offsetB = n * INP_BS - PH * IW - PW; float4 sum0 = {0.0f, 0.0f, 0.0f, 0.0f}, sum1 = {0.0f, 0.0f, 0.0f, 0.0f}, sum2 = {0.0f, 0.0f, 0.0f, 0.0f}, sum3 = {0.0f, 0.0f, 0.0f, 0.0f}; uint32_t fh = tidy / FW % FH; uint32_t fw = tidy % FW; uint32_t ic = tidy / (FH * FW); uint32_t icm = tidy % (FH * FW); const uint32_t fhs = BM / FW % FH; const uint32_t fws = BM % FW; const uint32_t ics = BM / (FH * FW); const uint32_t icms = BM % (FH * FW); for (; i < widthA; i += BM, offsetA += BM) { // load localA if (tidx < BM) { localA[tidy][tidx].x = filter.get(offsetA + 0 * widthA); localA[tidy][tidx].y = filter.get(offsetA + 1 * widthA); localA[tidy][tidx].z = filter.get(offsetA + 2 * widthA); localA[tidy][tidx].w = filter.get(offsetA + 3 * widthA); } // load localB /* const uint32_t fh_t = (tidy+i) / FW % FH; const uint32_t fw_t = (tidy+i) % FW; const uint32_t ic_t = (tidy+i) / (FH*FW); if (fh != fh_t) printf("fh=%d, fh_t=%d\n", fh, fh_t); if (fw != fw_t) printf("fw=%d, fw_t=%d\n", fw, fw_t); if (ic != ic_t) printf("ic=%d, ic_t=%d\n", ic, ic_t); */ uint32_t fh2, fw2; if (is_xcorr) { fh2 = fh; fw2 = fw; } else { fh2 = FH - fh - 1; fw2 = FW - fw - 1; } if (tidy < BM) { uint32_t tmp = offsetB + (ic * IH + (fh2)) * IW + (fw2), ok = bool_as_mask(tidy + i < heightB), p0 = bool_as_mask( fh2 + oh0 >= PH && fh2 + oh0 < IH + PH && fw2 + ow0 >= PW && fw2 + ow0 < IW + PW), p1 = bool_as_mask( fh2 + oh1 >= PH && fh2 + oh1 < IH + PH && fw2 + ow1 >= PW && fw2 + ow1 < IW + PW), p2 = bool_as_mask( fh2 + oh2 >= PH && fh2 + oh2 < IH + PH && fw2 + ow2 >= PW && fw2 + ow2 < IW + PW), p3 = bool_as_mask( fh2 + oh3 >= PH && fh2 + oh3 < IH + PH && fw2 + ow3 >= PW && fw2 + ow3 < IW + PW); localB[tidy][tidx].x = visit_with_mask(src, tmp + op0, ok & p0); localB[tidy][tidx].y = visit_with_mask(src, tmp + op1, ok & p1); localB[tidy][tidx].z = visit_with_mask(src, tmp + op2, ok & p2); localB[tidy][tidx].w = visit_with_mask(src, tmp + op3, ok & p3); } __syncthreads(); for (uint32_t j = 0u; j < BM; ++j) { float4 tmpA = localA[tidy][j]; float4 tmpB = localB[j][tidx]; sum0.x += tmpA.x * tmpB.x; sum0.y += tmpA.x * tmpB.y; sum0.z += tmpA.x * tmpB.z; sum0.w += tmpA.x * tmpB.w; sum1.x += tmpA.y * tmpB.x; sum1.y += tmpA.y * tmpB.y; sum1.z += tmpA.y * tmpB.z; sum1.w += tmpA.y * tmpB.w; sum2.x += tmpA.z * tmpB.x; sum2.y += tmpA.z * tmpB.y; sum2.z += tmpA.z * tmpB.z; sum2.w += tmpA.z * tmpB.w; sum3.x += tmpA.w * tmpB.x; sum3.y += tmpA.w * tmpB.y; sum3.z += tmpA.w * tmpB.z; sum3.w += tmpA.w * tmpB.w; } fw += fws; fh += fhs; fh += (fw >= FW); fh -= (fh >= FH) * FH; fw -= (fw >= FW) * FW; ic += ics; icm += icms; ic += (icm >= FP); icm -= (icm >= FP) * FP; __syncthreads(); } const uint32_t dst_idx = n * OUT_BS + posy2 * widthB + posx2; bool y0 = (posy2 + 0 < heightA); bool y1 = (posy2 + 1 < heightA); bool y2 = (posy2 + 2 < heightA); bool y3 = (posy2 + 3 < heightA); bool x0 = (posx2 + 0 < widthB); bool x1 = (posx2 + 1 < widthB); bool x2 = (posx2 + 2 < widthB); bool x3 = (posx2 + 3 < widthB); if (y0) { if (x0) dst[dst_idx + 0 * widthB + 0] = sum0.x; if (x1) dst[dst_idx + 0 * widthB + 1] = sum0.y; if (x2) dst[dst_idx + 0 * widthB + 2] = sum0.z; if (x3) dst[dst_idx + 0 * widthB + 3] = sum0.w; } if (y1) { if (x0) dst[dst_idx + 1 * widthB + 0] = sum1.x; if (x1) dst[dst_idx + 1 * widthB + 1] = sum1.y; if (x2) dst[dst_idx + 1 * widthB + 2] = sum1.z; if (x3) dst[dst_idx + 1 * widthB + 3] = sum1.w; } if (y2) { if (x0) dst[dst_idx + 2 * widthB + 0] = sum2.x; if (x1) dst[dst_idx + 2 * widthB + 1] = sum2.y; if (x2) dst[dst_idx + 2 * widthB + 2] = sum2.z; if (x3) dst[dst_idx + 2 * widthB + 3] = sum2.w; } if (y3) { if (x0) dst[dst_idx + 3 * widthB + 0] = sum3.x; if (x1) dst[dst_idx + 3 * widthB + 1] = sum3.y; if (x2) dst[dst_idx + 3 * widthB + 2] = sum3.z; if (x3) dst[dst_idx + 3 * widthB + 3] = sum3.w; } } } // anonymous namespace void conv_bias::exec_inplace_matmul_fwd( const float* src, const float* filter, float* dst, size_t N, size_t INP_BS, size_t OUT_BS, size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH, size_t FW, size_t PH, size_t PW, size_t SH, size_t SW, bool is_xcorr, cudaStream_t stream) { BufferFetcherTextureHost src_tex(const_cast<float*>(src), N * INP_BS), filter_tex(const_cast<float*>(filter), OC * IC * FH * FW); BufferFetcherRaw src_buf, filter_buf; src_buf.ptr = src; filter_buf.ptr = filter; if (!src_tex.init_succ || !filter_tex.init_succ) { src_tex.reset(); filter_tex.reset(); } int m = OC; int n = OH * OW; int BY = 1; int BX = 1; if (m <= 64) { while (BY < 16 && (BY << 2) < m) BY <<= 1; BX = 256 / BY; } else if (n <= 64) { while (BX < 16 && (BX << 2) < n) BX <<= 1; BY = 256 / BX; } else { BX = BY = 16; } dim3 blocks((OH * OW + BX * 4 - 1) / (BX * 4), (OC + BY * 4 - 1) / (BY * 4), N); dim3 threads(BX, BY); #define DISPATCH_BX_BY(BX, BY) \ do { \ if (src_tex.init_succ) { \ KernelPtr<BufferFetcherTexture>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherTexture>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherTexture>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ src_tex.val, filter_tex.val, dst, INP_BS, OUT_BS, IC, IH, IW, OC, \ OH, OW, FH, FW, SH, SW, PH, PW); \ } else { \ KernelPtr<BufferFetcherRaw>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherRaw>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherRaw>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ src_buf, filter_buf, dst, INP_BS, OUT_BS, IC, IH, IW, OC, OH, OW, \ FH, FW, SH, SW, PH, PW); \ } \ } while (0) #define DISPATCH_BX(BX) \ do { \ DISPATCH_BX_BY(BX, 256 / BX); \ } while (0) #define DISPATCH() \ do { \ switch (BX) { \ case 1: \ DISPATCH_BX(1); \ break; \ case 2: \ DISPATCH_BX(2); \ break; \ case 4: \ DISPATCH_BX(4); \ break; \ case 8: \ DISPATCH_BX(8); \ break; \ case 16: \ DISPATCH_BX(16); \ break; \ case 32: \ DISPATCH_BX(32); \ break; \ case 64: \ DISPATCH_BX(64); \ break; \ case 128: \ DISPATCH_BX(128); \ break; \ case 256: \ DISPATCH_BX(256); \ break; \ default: \ report_error("no usable kernel"); \ } \ } while (0) DISPATCH(); #undef DISPATCH #undef DISPATCH_BX #undef DISPATCH_BX_BY after_kernel_launch(); } #include "src/cuda/kernel_common/diagnostic_epilogue.cuh" // vim: syntax=cpp.doxygen
ff750de529ebc5a14af531032c3ffa508604fb62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * An implementation of COO SpMV using prefix scan to implement a * reduce-value-by-row strategy ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iterator> #include <vector> #include <string> #include <algorithm> #include <stdio.h> #include <hipcub/hipcub.hpp> #include "coo_graph.cuh" #include "../test/test_util.h" using namespace cub; using namespace std; /****************************************************************************** * Globals, constants, and typedefs ******************************************************************************/ typedef int VertexId; // uint32s as vertex ids typedef double Value; // double-precision floating point values bool g_verbose = false; int g_timing_iterations = 1; CachingDeviceAllocator g_allocator; /****************************************************************************** * Texture referencing ******************************************************************************/ /** * Templated texture reference type for multiplicand vector */ template <typename Value> struct TexVector { // Texture type to actually use (e.g., because CUDA doesn't load doubles as texture items) typedef typename If<(Equals<Value, double>::VALUE), uint2, Value>::Type CastType; // Texture reference type typedef texture<CastType, hipTextureType1D, hipReadModeElementType> TexRef; static TexRef ref; /** * Bind textures */ static void BindTexture(void *d_in, int elements) { hipChannelFormatDesc tex_desc = hipCreateChannelDesc<CastType>(); if (d_in) { size_t offset; size_t bytes = sizeof(CastType) * elements; CubDebugExit(hipBindTexture(&offset, ref, d_in, tex_desc, bytes)); } } /** * Unbind textures */ static void UnbindTexture() { CubDebugExit(hipUnbindTexture(ref)); } /** * Load */ static __device__ __forceinline__ Value Load(int offset) { Value output; reinterpret_cast<typename TexVector<Value>::CastType &>(output) = tex1Dfetch(TexVector<Value>::ref, offset); return output; } }; // Texture reference definitions template <typename Value> typename TexVector<Value>::TexRef TexVector<Value>::ref = 0; /****************************************************************************** * Utility types ******************************************************************************/ /** * A partial dot-product sum paired with a corresponding row-id */ template <typename VertexId, typename Value> struct PartialProduct { VertexId row; /// Row-id Value partial; /// PartialProduct sum }; /** * A partial dot-product sum paired with a corresponding row-id (specialized for double-int pairings) */ template <> struct PartialProduct<int, double> { long long row; /// Row-id double partial; /// PartialProduct sum }; /** * Reduce-value-by-row scan operator */ struct ReduceByKeyOp { template <typename PartialProduct> __device__ __forceinline__ PartialProduct operator()( const PartialProduct &first, const PartialProduct &second) { PartialProduct retval; retval.partial = (second.row != first.row) ? second.partial : first.partial + second.partial; retval.row = second.row; return retval; } }; /** * Stateful block-wide prefix operator for BlockScan */ template <typename PartialProduct> struct BlockPrefixCallbackOp { // Running block-wide prefix PartialProduct running_prefix; /** * Returns the block-wide running_prefix in thread-0 */ __device__ __forceinline__ PartialProduct operator()( const PartialProduct &block_aggregate) ///< The aggregate sum of the BlockScan inputs { ReduceByKeyOp scan_op; PartialProduct retval = running_prefix; running_prefix = scan_op(running_prefix, block_aggregate); return retval; } }; /** * Operator for detecting discontinuities in a list of row identifiers. */ struct NewRowOp { /// Returns true if row_b is the start of a new row template <typename VertexId> __device__ __forceinline__ bool operator()( const VertexId& row_a, const VertexId& row_b) { return (row_a != row_b); } }; /****************************************************************************** * Persistent thread block types ******************************************************************************/ /** * SpMV threadblock abstraction for processing a contiguous segment of * sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct PersistentBlockSpmv { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockExchange type for exchanging rows between warp-striped -> blocked arrangements typedef BlockExchange<VertexId, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeRows; // Parameterized BlockExchange type for exchanging values between warp-striped -> blocked arrangements typedef BlockExchange<Value, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeValues; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this threadblock struct TempStorage { union { typename BlockExchangeRows::TempStorage exchange_rows; // Smem needed for BlockExchangeRows typename BlockExchangeValues::TempStorage exchange_values; // Smem needed for BlockExchangeValues struct { typename BlockScan::TempStorage scan; // Smem needed for BlockScan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for BlockDiscontinuity }; }; VertexId first_block_row; ///< The first row-ID seen by this thread block VertexId last_block_row; ///< The last row-ID seen by this thread block Value first_product; ///< The first dot-product written by this thread block }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; VertexId *d_rows; VertexId *d_columns; Value *d_values; Value *d_vector; Value *d_result; PartialProduct *d_block_partials; int block_offset; int block_end; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ PersistentBlockSpmv( TempStorage &temp_storage, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result, PartialProduct *d_block_partials, int block_offset, int block_end) : temp_storage(temp_storage), d_rows(d_rows), d_columns(d_columns), d_values(d_values), d_vector(d_vector), d_result(d_result), d_block_partials(d_block_partials), block_offset(block_offset), block_end(block_end) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_rows[block_offset]; VertexId last_block_row = d_rows[block_end - 1]; temp_storage.first_block_row = first_block_row; temp_storage.last_block_row = last_block_row; temp_storage.first_product = Value(0); // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId columns[ITEMS_PER_THREAD]; VertexId rows[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a threadblock-striped tile of A (sparse row-ids, column-ids, and values) if (FULL_TILE) { // Unguarded loads LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows); } else { // This is a partial-tile (e.g., the last tile of input). Extend the coordinates of the last // vertex for out-of-bound items, but zero-valued LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns, guarded_items, VertexId(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values, guarded_items, Value(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows, guarded_items, temp_storage.last_block_row); } // Load the referenced values from x and compute the dot product partials sums #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { #if CUB_PTX_ARCH >= 350 values[ITEM] *= ThreadLoad<LOAD_LDG>(d_vector + columns[ITEM]); #else values[ITEM] *= TexVector<Value>::Load(columns[ITEM]); #endif } // Transpose from warp-striped to blocked arrangement BlockExchangeValues(temp_storage.exchange_values).WarpStripedToBlocked(values); __syncthreads(); // Transpose from warp-striped to blocked arrangement BlockExchangeRows(temp_storage.exchange_rows).WarpStripedToBlocked(rows); // Barrier for smem reuse and coherence __syncthreads(); // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( head_flags, // (Out) Head flags rows, // Original row ids NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Assemble partial product structures #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { partial_sums[ITEM].partial = values[ITEM]; partial_sums[ITEM].row = rows[ITEM]; } // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Barrier for smem reuse and coherence __syncthreads(); // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; // Save off the first partial product that this thread block will scatter if (partial_sums[ITEM].row == temp_storage.first_block_row) { temp_storage.first_product = partial_sums[ITEM].partial; } } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles while (block_offset <= block_end - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process the last, partially-full tile (if present) int guarded_items = block_end - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } if (threadIdx.x == 0) { if (gridDim.x == 1) { // Scatter the final aggregate (this kernel contains only 1 threadblock) d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } else { // Write the first and last partial products from this thread block so // that they can be subsequently "fixed up" in the next kernel. PartialProduct first_product; first_product.row = temp_storage.first_block_row; first_product.partial = temp_storage.first_product; d_block_partials[blockIdx.x * 2] = first_product; d_block_partials[(blockIdx.x * 2) + 1] = prefix_op.running_prefix; } } } }; /** * Threadblock abstraction for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct FinalizeSpmvBlock { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this threadblock struct TempStorage { typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-row scan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging VertexId last_block_row; }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; Value *d_result; PartialProduct *d_block_partials; int num_partials; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ FinalizeSpmvBlock( TempStorage &temp_storage, Value *d_result, PartialProduct *d_block_partials, int num_partials) : temp_storage(temp_storage), d_result(d_result), d_block_partials(d_block_partials), num_partials(num_partials) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_block_partials[0].row; VertexId last_block_row = d_block_partials[num_partials - 1].row; temp_storage.last_block_row = last_block_row; // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId rows[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a tile of block partials from previous kernel if (FULL_TILE) { // Full tile #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums); #endif } else { // Partial tile (extend zero-valued coordinates of the last partial-product for out-of-bounds items) PartialProduct default_sum; default_sum.row = temp_storage.last_block_row; default_sum.partial = Value(0); #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #endif } // Copy out row IDs for row-head flagging #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { rows[ITEM] = partial_sums[ITEM].row; } // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( rows, // Original row ids head_flags, // (Out) Head flags NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles int block_offset = 0; while (block_offset <= num_partials - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process final partial tile (if present) int guarded_items = num_partials - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } // Scatter the final aggregate (this kernel contains only 1 threadblock) if (threadIdx.x == 0) { d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } } }; /****************************************************************************** * Kernel entrypoints ******************************************************************************/ /** * SpMV kernel whose thread blocks each process a contiguous segment of sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS) __global__ void CooKernel( GridEvenShare<int> even_share, PartialProduct<VertexId, Value> *d_block_partials, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result) { // Specialize SpMV threadblock abstraction type typedef PersistentBlockSpmv<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> PersistentBlockSpmv; // Shared memory allocation __shared__ typename PersistentBlockSpmv::TempStorage temp_storage; // Initialize threadblock even-share to tell us where to start and stop our tile-processing even_share.BlockInit(); // Construct persistent thread block PersistentBlockSpmv persistent_block( temp_storage, d_rows, d_columns, d_values, d_vector, d_result, d_block_partials, even_share.block_offset, even_share.block_end); // Process input tiles persistent_block.ProcessTiles(); } /** * Kernel for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS, 1) __global__ void CooFinalizeKernel( PartialProduct<VertexId, Value> *d_block_partials, int num_partials, Value *d_result) { // Specialize "fix-up" threadblock abstraction type typedef FinalizeSpmvBlock<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> FinalizeSpmvBlock; // Shared memory allocation __shared__ typename FinalizeSpmvBlock::TempStorage temp_storage; // Construct persistent thread block FinalizeSpmvBlock persistent_block(temp_storage, d_result, d_block_partials, num_partials); // Process input tiles persistent_block.ProcessTiles(); } //--------------------------------------------------------------------- // Host subroutines //--------------------------------------------------------------------- /** * Simple test of device */ template < int COO_BLOCK_THREADS, int COO_ITEMS_PER_THREAD, int COO_SUBSCRIPTION_FACTOR, int FINALIZE_BLOCK_THREADS, int FINALIZE_ITEMS_PER_THREAD, typename VertexId, typename Value> void TestDevice( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { typedef PartialProduct<VertexId, Value> PartialProduct; const int COO_TILE_SIZE = COO_BLOCK_THREADS * COO_ITEMS_PER_THREAD; // SOA device storage VertexId *d_rows; // SOA graph row coordinates VertexId *d_columns; // SOA graph col coordinates Value *d_values; // SOA graph values Value *d_vector; // Vector multiplicand Value *d_result; // Output row PartialProduct *d_block_partials; // Temporary storage for communicating dot product partials between threadblocks // Create SOA version of coo_graph on host int num_edges = coo_graph.coo_tuples.size(); VertexId *h_rows = new VertexId[num_edges]; VertexId *h_columns = new VertexId[num_edges]; Value *h_values = new Value[num_edges]; for (int i = 0; i < num_edges; i++) { h_rows[i] = coo_graph.coo_tuples[i].row; h_columns[i] = coo_graph.coo_tuples[i].col; h_values[i] = coo_graph.coo_tuples[i].val; } // Get CUDA properties Device device_props; CubDebugExit(device_props.Init()); // Determine launch configuration from kernel properties int coo_sm_occupancy; CubDebugExit(device_props.MaxSmOccupancy( coo_sm_occupancy, CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, VertexId, Value>, COO_BLOCK_THREADS)); int max_coo_grid_size = device_props.sm_count * coo_sm_occupancy * COO_SUBSCRIPTION_FACTOR; // Construct an even-share work distribution GridEvenShare<int> even_share(num_edges, max_coo_grid_size, COO_TILE_SIZE); int coo_grid_size = even_share.grid_size; int num_partials = coo_grid_size * 2; // Allocate COO device arrays CubDebugExit(g_allocator.DeviceAllocate((void**)&d_rows, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_columns, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_vector, sizeof(Value) * coo_graph.col_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_result, sizeof(Value) * coo_graph.row_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_block_partials, sizeof(PartialProduct) * num_partials)); // Copy host arrays to device CubDebugExit(hipMemcpy(d_rows, h_rows, sizeof(VertexId) * num_edges, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_columns, h_columns, sizeof(VertexId) * num_edges, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_values, h_values, sizeof(Value) * num_edges, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_vector, h_vector, sizeof(Value) * coo_graph.col_dim, hipMemcpyHostToDevice)); // Bind textures TexVector<Value>::BindTexture(d_vector, coo_graph.col_dim); // Print debug info printf("CooKernel<%d, %d><<<%d, %d>>>(...), Max SM occupancy: %d\n", COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, coo_grid_size, COO_BLOCK_THREADS, coo_sm_occupancy); if (coo_grid_size > 1) { printf("CooFinalizeKernel<<<1, %d>>>(...)\n", FINALIZE_BLOCK_THREADS); } fflush(stdout); CubDebugExit(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte)); // Run kernel (always run one iteration without timing) GpuTimer gpu_timer; float elapsed_millis = 0.0; for (int i = 0; i <= g_timing_iterations; i++) { gpu_timer.Start(); // Initialize output CubDebugExit(hipMemset(d_result, 0, coo_graph.row_dim * sizeof(Value))); // Run the COO kernel hipLaunchKernelGGL(( CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD>), dim3(coo_grid_size), dim3(COO_BLOCK_THREADS), 0, 0, even_share, d_block_partials, d_rows, d_columns, d_values, d_vector, d_result); if (coo_grid_size > 1) { // Run the COO finalize kernel hipLaunchKernelGGL(( CooFinalizeKernel<FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD>), dim3(1), dim3(FINALIZE_BLOCK_THREADS), 0, 0, d_block_partials, num_partials, d_result); } gpu_timer.Stop(); if (i > 0) elapsed_millis += gpu_timer.ElapsedMillis(); } // Force any kernel stdio to screen CubDebugExit(hipDeviceSynchronize()); fflush(stdout); // Display timing if (g_timing_iterations > 0) { float avg_elapsed = elapsed_millis / g_timing_iterations; int total_bytes = ((sizeof(VertexId) + sizeof(VertexId)) * 2 * num_edges) + (sizeof(Value) * coo_graph.row_dim); printf("%d iterations, average elapsed (%.3f ms), utilized bandwidth (%.3f GB/s), GFLOPS(%.3f)\n", g_timing_iterations, avg_elapsed, total_bytes / avg_elapsed / 1000.0 / 1000.0, num_edges * 2 / avg_elapsed / 1000.0 / 1000.0); } // Check results int compare = CompareDeviceResults(h_reference, d_result, coo_graph.row_dim, true, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Cleanup TexVector<Value>::UnbindTexture(); CubDebugExit(g_allocator.DeviceFree(d_block_partials)); CubDebugExit(g_allocator.DeviceFree(d_rows)); CubDebugExit(g_allocator.DeviceFree(d_columns)); CubDebugExit(g_allocator.DeviceFree(d_values)); CubDebugExit(g_allocator.DeviceFree(d_vector)); CubDebugExit(g_allocator.DeviceFree(d_result)); delete[] h_rows; delete[] h_columns; delete[] h_values; } /** * Compute reference answer on CPU */ template <typename VertexId, typename Value> void ComputeReference( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { for (VertexId i = 0; i < coo_graph.row_dim; i++) { h_reference[i] = 0.0; } for (VertexId i = 0; i < coo_graph.coo_tuples.size(); i++) { h_reference[coo_graph.coo_tuples[i].row] += coo_graph.coo_tuples[i].val * h_vector[coo_graph.coo_tuples[i].col]; } } /** * Assign arbitrary values to vector items */ template <typename Value> void AssignVectorValues(Value *vector, int col_dim) { for (int i = 0; i < col_dim; i++) { vector[i] = 1.0; } } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("i", g_timing_iterations); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s\n [--device=<device-id>] [--v] [--iterations=<test iterations>] [--grid-size=<grid-size>]\n" "\t--type=wheel --spokes=<spokes>\n" "\t--type=grid2d --width=<width> [--no-self-loops]\n" "\t--type=grid3d --width=<width> [--no-self-loops]\n" "\t--type=market --file=<file>\n" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Get graph type string type; args.GetCmdLineArgument("type", type); // Generate graph structure CpuTimer timer; timer.Start(); CooGraph<VertexId, Value> coo_graph; if (type == string("grid2d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid2d width(%d)... ", (self_loops) ? "5-pt" : "4-pt", width); fflush(stdout); if (coo_graph.InitGrid2d(width, self_loops)) exit(1); } else if (type == string("grid3d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid3d width(%d)... ", (self_loops) ? "7-pt" : "6-pt", width); fflush(stdout); if (coo_graph.InitGrid3d(width, self_loops)) exit(1); } else if (type == string("wheel")) { VertexId spokes; args.GetCmdLineArgument("spokes", spokes); printf("Generating wheel spokes(%d)... ", spokes); fflush(stdout); if (coo_graph.InitWheel(spokes)) exit(1); } else if (type == string("market")) { string filename; args.GetCmdLineArgument("file", filename); printf("Generating MARKET for %s... ", filename.c_str()); fflush(stdout); if (coo_graph.InitMarket(filename)) exit(1); } else { printf("Unsupported graph type\n"); exit(1); } timer.Stop(); printf("Done (%.3fs). %d non-zeros, %d rows, %d columns\n", timer.ElapsedMillis() / 1000.0, coo_graph.coo_tuples.size(), coo_graph.row_dim, coo_graph.col_dim); fflush(stdout); if (g_verbose) { cout << coo_graph << "\n"; } // Create vector Value *h_vector = new Value[coo_graph.col_dim]; AssignVectorValues(h_vector, coo_graph.col_dim); if (g_verbose) { printf("Vector[%d]: ", coo_graph.col_dim); DisplayResults(h_vector, coo_graph.col_dim); printf("\n\n"); } // Compute reference answer Value *h_reference = new Value[coo_graph.row_dim]; ComputeReference(coo_graph, h_vector, h_reference); if (g_verbose) { printf("Results[%d]: ", coo_graph.row_dim); DisplayResults(h_reference, coo_graph.row_dim); printf("\n\n"); } // Parameterization for SM35 enum { COO_BLOCK_THREADS = 64, COO_ITEMS_PER_THREAD = 10, COO_SUBSCRIPTION_FACTOR = 4, FINALIZE_BLOCK_THREADS = 256, FINALIZE_ITEMS_PER_THREAD = 4, }; // Run GPU version TestDevice< COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, COO_SUBSCRIPTION_FACTOR, FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD>(coo_graph, h_vector, h_reference); // Cleanup delete[] h_vector; delete[] h_reference; return 0; }
ff750de529ebc5a14af531032c3ffa508604fb62.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * An implementation of COO SpMV using prefix scan to implement a * reduce-value-by-row strategy ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iterator> #include <vector> #include <string> #include <algorithm> #include <stdio.h> #include <cub/cub.cuh> #include "coo_graph.cuh" #include "../test/test_util.h" using namespace cub; using namespace std; /****************************************************************************** * Globals, constants, and typedefs ******************************************************************************/ typedef int VertexId; // uint32s as vertex ids typedef double Value; // double-precision floating point values bool g_verbose = false; int g_timing_iterations = 1; CachingDeviceAllocator g_allocator; /****************************************************************************** * Texture referencing ******************************************************************************/ /** * Templated texture reference type for multiplicand vector */ template <typename Value> struct TexVector { // Texture type to actually use (e.g., because CUDA doesn't load doubles as texture items) typedef typename If<(Equals<Value, double>::VALUE), uint2, Value>::Type CastType; // Texture reference type typedef texture<CastType, cudaTextureType1D, cudaReadModeElementType> TexRef; static TexRef ref; /** * Bind textures */ static void BindTexture(void *d_in, int elements) { cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc<CastType>(); if (d_in) { size_t offset; size_t bytes = sizeof(CastType) * elements; CubDebugExit(cudaBindTexture(&offset, ref, d_in, tex_desc, bytes)); } } /** * Unbind textures */ static void UnbindTexture() { CubDebugExit(cudaUnbindTexture(ref)); } /** * Load */ static __device__ __forceinline__ Value Load(int offset) { Value output; reinterpret_cast<typename TexVector<Value>::CastType &>(output) = tex1Dfetch(TexVector<Value>::ref, offset); return output; } }; // Texture reference definitions template <typename Value> typename TexVector<Value>::TexRef TexVector<Value>::ref = 0; /****************************************************************************** * Utility types ******************************************************************************/ /** * A partial dot-product sum paired with a corresponding row-id */ template <typename VertexId, typename Value> struct PartialProduct { VertexId row; /// Row-id Value partial; /// PartialProduct sum }; /** * A partial dot-product sum paired with a corresponding row-id (specialized for double-int pairings) */ template <> struct PartialProduct<int, double> { long long row; /// Row-id double partial; /// PartialProduct sum }; /** * Reduce-value-by-row scan operator */ struct ReduceByKeyOp { template <typename PartialProduct> __device__ __forceinline__ PartialProduct operator()( const PartialProduct &first, const PartialProduct &second) { PartialProduct retval; retval.partial = (second.row != first.row) ? second.partial : first.partial + second.partial; retval.row = second.row; return retval; } }; /** * Stateful block-wide prefix operator for BlockScan */ template <typename PartialProduct> struct BlockPrefixCallbackOp { // Running block-wide prefix PartialProduct running_prefix; /** * Returns the block-wide running_prefix in thread-0 */ __device__ __forceinline__ PartialProduct operator()( const PartialProduct &block_aggregate) ///< The aggregate sum of the BlockScan inputs { ReduceByKeyOp scan_op; PartialProduct retval = running_prefix; running_prefix = scan_op(running_prefix, block_aggregate); return retval; } }; /** * Operator for detecting discontinuities in a list of row identifiers. */ struct NewRowOp { /// Returns true if row_b is the start of a new row template <typename VertexId> __device__ __forceinline__ bool operator()( const VertexId& row_a, const VertexId& row_b) { return (row_a != row_b); } }; /****************************************************************************** * Persistent thread block types ******************************************************************************/ /** * SpMV threadblock abstraction for processing a contiguous segment of * sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct PersistentBlockSpmv { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockExchange type for exchanging rows between warp-striped -> blocked arrangements typedef BlockExchange<VertexId, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeRows; // Parameterized BlockExchange type for exchanging values between warp-striped -> blocked arrangements typedef BlockExchange<Value, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeValues; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this threadblock struct TempStorage { union { typename BlockExchangeRows::TempStorage exchange_rows; // Smem needed for BlockExchangeRows typename BlockExchangeValues::TempStorage exchange_values; // Smem needed for BlockExchangeValues struct { typename BlockScan::TempStorage scan; // Smem needed for BlockScan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for BlockDiscontinuity }; }; VertexId first_block_row; ///< The first row-ID seen by this thread block VertexId last_block_row; ///< The last row-ID seen by this thread block Value first_product; ///< The first dot-product written by this thread block }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; VertexId *d_rows; VertexId *d_columns; Value *d_values; Value *d_vector; Value *d_result; PartialProduct *d_block_partials; int block_offset; int block_end; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ PersistentBlockSpmv( TempStorage &temp_storage, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result, PartialProduct *d_block_partials, int block_offset, int block_end) : temp_storage(temp_storage), d_rows(d_rows), d_columns(d_columns), d_values(d_values), d_vector(d_vector), d_result(d_result), d_block_partials(d_block_partials), block_offset(block_offset), block_end(block_end) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_rows[block_offset]; VertexId last_block_row = d_rows[block_end - 1]; temp_storage.first_block_row = first_block_row; temp_storage.last_block_row = last_block_row; temp_storage.first_product = Value(0); // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId columns[ITEMS_PER_THREAD]; VertexId rows[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a threadblock-striped tile of A (sparse row-ids, column-ids, and values) if (FULL_TILE) { // Unguarded loads LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows); } else { // This is a partial-tile (e.g., the last tile of input). Extend the coordinates of the last // vertex for out-of-bound items, but zero-valued LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns, guarded_items, VertexId(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values, guarded_items, Value(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows, guarded_items, temp_storage.last_block_row); } // Load the referenced values from x and compute the dot product partials sums #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { #if CUB_PTX_ARCH >= 350 values[ITEM] *= ThreadLoad<LOAD_LDG>(d_vector + columns[ITEM]); #else values[ITEM] *= TexVector<Value>::Load(columns[ITEM]); #endif } // Transpose from warp-striped to blocked arrangement BlockExchangeValues(temp_storage.exchange_values).WarpStripedToBlocked(values); __syncthreads(); // Transpose from warp-striped to blocked arrangement BlockExchangeRows(temp_storage.exchange_rows).WarpStripedToBlocked(rows); // Barrier for smem reuse and coherence __syncthreads(); // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( head_flags, // (Out) Head flags rows, // Original row ids NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Assemble partial product structures #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { partial_sums[ITEM].partial = values[ITEM]; partial_sums[ITEM].row = rows[ITEM]; } // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Barrier for smem reuse and coherence __syncthreads(); // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; // Save off the first partial product that this thread block will scatter if (partial_sums[ITEM].row == temp_storage.first_block_row) { temp_storage.first_product = partial_sums[ITEM].partial; } } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles while (block_offset <= block_end - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process the last, partially-full tile (if present) int guarded_items = block_end - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } if (threadIdx.x == 0) { if (gridDim.x == 1) { // Scatter the final aggregate (this kernel contains only 1 threadblock) d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } else { // Write the first and last partial products from this thread block so // that they can be subsequently "fixed up" in the next kernel. PartialProduct first_product; first_product.row = temp_storage.first_block_row; first_product.partial = temp_storage.first_product; d_block_partials[blockIdx.x * 2] = first_product; d_block_partials[(blockIdx.x * 2) + 1] = prefix_op.running_prefix; } } } }; /** * Threadblock abstraction for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct FinalizeSpmvBlock { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this threadblock struct TempStorage { typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-row scan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging VertexId last_block_row; }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; Value *d_result; PartialProduct *d_block_partials; int num_partials; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ FinalizeSpmvBlock( TempStorage &temp_storage, Value *d_result, PartialProduct *d_block_partials, int num_partials) : temp_storage(temp_storage), d_result(d_result), d_block_partials(d_block_partials), num_partials(num_partials) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_block_partials[0].row; VertexId last_block_row = d_block_partials[num_partials - 1].row; temp_storage.last_block_row = last_block_row; // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId rows[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a tile of block partials from previous kernel if (FULL_TILE) { // Full tile #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums); #endif } else { // Partial tile (extend zero-valued coordinates of the last partial-product for out-of-bounds items) PartialProduct default_sum; default_sum.row = temp_storage.last_block_row; default_sum.partial = Value(0); #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #endif } // Copy out row IDs for row-head flagging #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { rows[ITEM] = partial_sums[ITEM].row; } // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( rows, // Original row ids head_flags, // (Out) Head flags NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles int block_offset = 0; while (block_offset <= num_partials - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process final partial tile (if present) int guarded_items = num_partials - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } // Scatter the final aggregate (this kernel contains only 1 threadblock) if (threadIdx.x == 0) { d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } } }; /****************************************************************************** * Kernel entrypoints ******************************************************************************/ /** * SpMV kernel whose thread blocks each process a contiguous segment of sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS) __global__ void CooKernel( GridEvenShare<int> even_share, PartialProduct<VertexId, Value> *d_block_partials, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result) { // Specialize SpMV threadblock abstraction type typedef PersistentBlockSpmv<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> PersistentBlockSpmv; // Shared memory allocation __shared__ typename PersistentBlockSpmv::TempStorage temp_storage; // Initialize threadblock even-share to tell us where to start and stop our tile-processing even_share.BlockInit(); // Construct persistent thread block PersistentBlockSpmv persistent_block( temp_storage, d_rows, d_columns, d_values, d_vector, d_result, d_block_partials, even_share.block_offset, even_share.block_end); // Process input tiles persistent_block.ProcessTiles(); } /** * Kernel for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS, 1) __global__ void CooFinalizeKernel( PartialProduct<VertexId, Value> *d_block_partials, int num_partials, Value *d_result) { // Specialize "fix-up" threadblock abstraction type typedef FinalizeSpmvBlock<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> FinalizeSpmvBlock; // Shared memory allocation __shared__ typename FinalizeSpmvBlock::TempStorage temp_storage; // Construct persistent thread block FinalizeSpmvBlock persistent_block(temp_storage, d_result, d_block_partials, num_partials); // Process input tiles persistent_block.ProcessTiles(); } //--------------------------------------------------------------------- // Host subroutines //--------------------------------------------------------------------- /** * Simple test of device */ template < int COO_BLOCK_THREADS, int COO_ITEMS_PER_THREAD, int COO_SUBSCRIPTION_FACTOR, int FINALIZE_BLOCK_THREADS, int FINALIZE_ITEMS_PER_THREAD, typename VertexId, typename Value> void TestDevice( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { typedef PartialProduct<VertexId, Value> PartialProduct; const int COO_TILE_SIZE = COO_BLOCK_THREADS * COO_ITEMS_PER_THREAD; // SOA device storage VertexId *d_rows; // SOA graph row coordinates VertexId *d_columns; // SOA graph col coordinates Value *d_values; // SOA graph values Value *d_vector; // Vector multiplicand Value *d_result; // Output row PartialProduct *d_block_partials; // Temporary storage for communicating dot product partials between threadblocks // Create SOA version of coo_graph on host int num_edges = coo_graph.coo_tuples.size(); VertexId *h_rows = new VertexId[num_edges]; VertexId *h_columns = new VertexId[num_edges]; Value *h_values = new Value[num_edges]; for (int i = 0; i < num_edges; i++) { h_rows[i] = coo_graph.coo_tuples[i].row; h_columns[i] = coo_graph.coo_tuples[i].col; h_values[i] = coo_graph.coo_tuples[i].val; } // Get CUDA properties Device device_props; CubDebugExit(device_props.Init()); // Determine launch configuration from kernel properties int coo_sm_occupancy; CubDebugExit(device_props.MaxSmOccupancy( coo_sm_occupancy, CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, VertexId, Value>, COO_BLOCK_THREADS)); int max_coo_grid_size = device_props.sm_count * coo_sm_occupancy * COO_SUBSCRIPTION_FACTOR; // Construct an even-share work distribution GridEvenShare<int> even_share(num_edges, max_coo_grid_size, COO_TILE_SIZE); int coo_grid_size = even_share.grid_size; int num_partials = coo_grid_size * 2; // Allocate COO device arrays CubDebugExit(g_allocator.DeviceAllocate((void**)&d_rows, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_columns, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_vector, sizeof(Value) * coo_graph.col_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_result, sizeof(Value) * coo_graph.row_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_block_partials, sizeof(PartialProduct) * num_partials)); // Copy host arrays to device CubDebugExit(cudaMemcpy(d_rows, h_rows, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_columns, h_columns, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_vector, h_vector, sizeof(Value) * coo_graph.col_dim, cudaMemcpyHostToDevice)); // Bind textures TexVector<Value>::BindTexture(d_vector, coo_graph.col_dim); // Print debug info printf("CooKernel<%d, %d><<<%d, %d>>>(...), Max SM occupancy: %d\n", COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, coo_grid_size, COO_BLOCK_THREADS, coo_sm_occupancy); if (coo_grid_size > 1) { printf("CooFinalizeKernel<<<1, %d>>>(...)\n", FINALIZE_BLOCK_THREADS); } fflush(stdout); CubDebugExit(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte)); // Run kernel (always run one iteration without timing) GpuTimer gpu_timer; float elapsed_millis = 0.0; for (int i = 0; i <= g_timing_iterations; i++) { gpu_timer.Start(); // Initialize output CubDebugExit(cudaMemset(d_result, 0, coo_graph.row_dim * sizeof(Value))); // Run the COO kernel CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD><<<coo_grid_size, COO_BLOCK_THREADS>>>( even_share, d_block_partials, d_rows, d_columns, d_values, d_vector, d_result); if (coo_grid_size > 1) { // Run the COO finalize kernel CooFinalizeKernel<FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD><<<1, FINALIZE_BLOCK_THREADS>>>( d_block_partials, num_partials, d_result); } gpu_timer.Stop(); if (i > 0) elapsed_millis += gpu_timer.ElapsedMillis(); } // Force any kernel stdio to screen CubDebugExit(cudaThreadSynchronize()); fflush(stdout); // Display timing if (g_timing_iterations > 0) { float avg_elapsed = elapsed_millis / g_timing_iterations; int total_bytes = ((sizeof(VertexId) + sizeof(VertexId)) * 2 * num_edges) + (sizeof(Value) * coo_graph.row_dim); printf("%d iterations, average elapsed (%.3f ms), utilized bandwidth (%.3f GB/s), GFLOPS(%.3f)\n", g_timing_iterations, avg_elapsed, total_bytes / avg_elapsed / 1000.0 / 1000.0, num_edges * 2 / avg_elapsed / 1000.0 / 1000.0); } // Check results int compare = CompareDeviceResults(h_reference, d_result, coo_graph.row_dim, true, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Cleanup TexVector<Value>::UnbindTexture(); CubDebugExit(g_allocator.DeviceFree(d_block_partials)); CubDebugExit(g_allocator.DeviceFree(d_rows)); CubDebugExit(g_allocator.DeviceFree(d_columns)); CubDebugExit(g_allocator.DeviceFree(d_values)); CubDebugExit(g_allocator.DeviceFree(d_vector)); CubDebugExit(g_allocator.DeviceFree(d_result)); delete[] h_rows; delete[] h_columns; delete[] h_values; } /** * Compute reference answer on CPU */ template <typename VertexId, typename Value> void ComputeReference( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { for (VertexId i = 0; i < coo_graph.row_dim; i++) { h_reference[i] = 0.0; } for (VertexId i = 0; i < coo_graph.coo_tuples.size(); i++) { h_reference[coo_graph.coo_tuples[i].row] += coo_graph.coo_tuples[i].val * h_vector[coo_graph.coo_tuples[i].col]; } } /** * Assign arbitrary values to vector items */ template <typename Value> void AssignVectorValues(Value *vector, int col_dim) { for (int i = 0; i < col_dim; i++) { vector[i] = 1.0; } } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("i", g_timing_iterations); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s\n [--device=<device-id>] [--v] [--iterations=<test iterations>] [--grid-size=<grid-size>]\n" "\t--type=wheel --spokes=<spokes>\n" "\t--type=grid2d --width=<width> [--no-self-loops]\n" "\t--type=grid3d --width=<width> [--no-self-loops]\n" "\t--type=market --file=<file>\n" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Get graph type string type; args.GetCmdLineArgument("type", type); // Generate graph structure CpuTimer timer; timer.Start(); CooGraph<VertexId, Value> coo_graph; if (type == string("grid2d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid2d width(%d)... ", (self_loops) ? "5-pt" : "4-pt", width); fflush(stdout); if (coo_graph.InitGrid2d(width, self_loops)) exit(1); } else if (type == string("grid3d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid3d width(%d)... ", (self_loops) ? "7-pt" : "6-pt", width); fflush(stdout); if (coo_graph.InitGrid3d(width, self_loops)) exit(1); } else if (type == string("wheel")) { VertexId spokes; args.GetCmdLineArgument("spokes", spokes); printf("Generating wheel spokes(%d)... ", spokes); fflush(stdout); if (coo_graph.InitWheel(spokes)) exit(1); } else if (type == string("market")) { string filename; args.GetCmdLineArgument("file", filename); printf("Generating MARKET for %s... ", filename.c_str()); fflush(stdout); if (coo_graph.InitMarket(filename)) exit(1); } else { printf("Unsupported graph type\n"); exit(1); } timer.Stop(); printf("Done (%.3fs). %d non-zeros, %d rows, %d columns\n", timer.ElapsedMillis() / 1000.0, coo_graph.coo_tuples.size(), coo_graph.row_dim, coo_graph.col_dim); fflush(stdout); if (g_verbose) { cout << coo_graph << "\n"; } // Create vector Value *h_vector = new Value[coo_graph.col_dim]; AssignVectorValues(h_vector, coo_graph.col_dim); if (g_verbose) { printf("Vector[%d]: ", coo_graph.col_dim); DisplayResults(h_vector, coo_graph.col_dim); printf("\n\n"); } // Compute reference answer Value *h_reference = new Value[coo_graph.row_dim]; ComputeReference(coo_graph, h_vector, h_reference); if (g_verbose) { printf("Results[%d]: ", coo_graph.row_dim); DisplayResults(h_reference, coo_graph.row_dim); printf("\n\n"); } // Parameterization for SM35 enum { COO_BLOCK_THREADS = 64, COO_ITEMS_PER_THREAD = 10, COO_SUBSCRIPTION_FACTOR = 4, FINALIZE_BLOCK_THREADS = 256, FINALIZE_ITEMS_PER_THREAD = 4, }; // Run GPU version TestDevice< COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, COO_SUBSCRIPTION_FACTOR, FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD>(coo_graph, h_vector, h_reference); // Cleanup delete[] h_vector; delete[] h_reference; return 0; }
95f474e38481ec5b9305a08ddaab598a945ec15e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> int i, j, k; //funcion que llena la matriz randonmente float** rand_matrix(float **mat, int kuz) { float **backup = mat; for(i = 0; i < kuz; i++) for(j = 0; j < kuz; j++) mat[i][j] = (rand()%1000+1); return backup; } //funcion para imprimir la matriz void print_matrix(float **mat, int kuz) { for(i = 0; i < kuz; i++) { for(j = 0; j < kuz; j++) printf(" %f |", mat[i][j]); printf("\n"); } } int main(int argc, char *argv[]) { if (argc != 3) { fprintf(stderr, "The right use it's %s <number of the N*N matrix> <0 if you want prints or 1 if you don't>\n", argv[0]); exit(-1); } hipEvent_t inicio, fin, st2, fn2; float tiempo, tt2; float **mat, **mat2, **res; int N = atoi(argv[1]); //iniciamos el crono hipEventCreate( &st2 ); hipEventCreate( &fn2 ); hipEventRecord( st2, 0 ); // aloja memoria para la matriz mat = (float **)malloc(N*sizeof(float*)); mat2 = (float **)malloc(N*sizeof(float*)); res = (float **)malloc(N*sizeof(float*)); for(i = 0; i < N; i++) { //aloja la memoria por celda de la matriz mat[i] = (float *)malloc(N*sizeof(float)); mat2[i] = (float *)malloc(N*sizeof(float)); res[i] = (float *)malloc(N*sizeof(float)); } //llena las matrices mat = rand_matrix(mat, N); mat2 = rand_matrix(mat2, N); //imprime las matrices if(atoi(argv[2]) == 0) { print_matrix(mat, N); printf("\n\n\n"); print_matrix(mat2, N); } //crono de los calculos hipEventCreate( &inicio ); hipEventCreate( &fin ); hipEventRecord( inicio, 0 ); //hacemos la multiplicacion #pragma omp parallel for private(k) for(i=0; i<N; ++i) for(j=0; j<N; ++j) for(k=0; k<N; ++k) res[i][j]+=mat[i][k]*mat2[k][j]; //paramos el crono de los calculos hipEventRecord( fin, 0 ); hipEventSynchronize( fin ); hipEventElapsedTime( &tiempo, inicio, fin ); //imprimimos el resultado if(atoi(argv[2]) == 0) { printf("\n\n\n"); print_matrix(res,N); } //liberamesta free(mat); free(mat2); free(res); hipEventRecord( fn2, 0 ); hipEventSynchronize( fn2 ); hipEventElapsedTime( &tt2, st2, fn2 );//paramos cronometro y medimos tiempo total printf("tiempo calculos en ms: %f\t tiempo de total %f\n", tiempo,tt2);//cazar elefantes... o imprimir no se solo soy un comentario }
95f474e38481ec5b9305a08ddaab598a945ec15e.cu
#include <stdlib.h> #include <stdio.h> int i, j, k; //funcion que llena la matriz randonmente float** rand_matrix(float **mat, int kuz) { float **backup = mat; for(i = 0; i < kuz; i++) for(j = 0; j < kuz; j++) mat[i][j] = (rand()%1000+1); return backup; } //funcion para imprimir la matriz void print_matrix(float **mat, int kuz) { for(i = 0; i < kuz; i++) { for(j = 0; j < kuz; j++) printf(" %f |", mat[i][j]); printf("\n"); } } int main(int argc, char *argv[]) { if (argc != 3) { fprintf(stderr, "The right use it's %s <number of the N*N matrix> <0 if you want prints or 1 if you don't>\n", argv[0]); exit(-1); } cudaEvent_t inicio, fin, st2, fn2; float tiempo, tt2; float **mat, **mat2, **res; int N = atoi(argv[1]); //iniciamos el crono cudaEventCreate( &st2 ); cudaEventCreate( &fn2 ); cudaEventRecord( st2, 0 ); // aloja memoria para la matriz mat = (float **)malloc(N*sizeof(float*)); mat2 = (float **)malloc(N*sizeof(float*)); res = (float **)malloc(N*sizeof(float*)); for(i = 0; i < N; i++) { //aloja la memoria por celda de la matriz mat[i] = (float *)malloc(N*sizeof(float)); mat2[i] = (float *)malloc(N*sizeof(float)); res[i] = (float *)malloc(N*sizeof(float)); } //llena las matrices mat = rand_matrix(mat, N); mat2 = rand_matrix(mat2, N); //imprime las matrices if(atoi(argv[2]) == 0) { print_matrix(mat, N); printf("\n\n\n"); print_matrix(mat2, N); } //crono de los calculos cudaEventCreate( &inicio ); cudaEventCreate( &fin ); cudaEventRecord( inicio, 0 ); //hacemos la multiplicacion #pragma omp parallel for private(k) for(i=0; i<N; ++i) for(j=0; j<N; ++j) for(k=0; k<N; ++k) res[i][j]+=mat[i][k]*mat2[k][j]; //paramos el crono de los calculos cudaEventRecord( fin, 0 ); cudaEventSynchronize( fin ); cudaEventElapsedTime( &tiempo, inicio, fin ); //imprimimos el resultado if(atoi(argv[2]) == 0) { printf("\n\n\n"); print_matrix(res,N); } //liberamesta free(mat); free(mat2); free(res); cudaEventRecord( fn2, 0 ); cudaEventSynchronize( fn2 ); cudaEventElapsedTime( &tt2, st2, fn2 );//paramos cronometro y medimos tiempo total printf("tiempo calculos en ms: %f\t tiempo de total %f\n", tiempo,tt2);//cazar elefantes... o imprimir no se solo soy un comentario }
90eea85e1f2d5059d4b6c542fe3af415d80fecd6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_out_frame( const scalar_t* idata, scalar_t* odata, const size_t nc, const size_t height1, const size_t width1, const size_t height2, const size_t width2, float height_scale, float width_scale) { size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z; int w2 = threadIdx.x + blockIdx.x * blockDim.x; int h2 = threadIdx.y + blockIdx.y * blockDim.y; if (w2 >= width2 || h2 >= height2) { return; } int nc_stride = blockDim.z * gridDim.z; const size_t h1 = height1 == height2 ? h2 : nearest_neighbor_compute_source_index(height_scale, h2, height1); const size_t w1 = width1 == width2 ? w2 : nearest_neighbor_compute_source_index(width_scale, w2, width1); size_t src_index = (nc_iter * height1 + h1) * width1 + w1; size_t src_index_stride = nc_stride * width1 * height1; size_t dst_index = (nc_iter * height2 + h2) * width2 + w2; size_t dst_index_stride = nc_stride * width2 * height2; // iterating over while (nc_iter < nc) { odata[dst_index] = idata[src_index]; dst_index += dst_index_stride; src_index += src_index_stride; nc_iter += nc_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_h, size_t dst_dim_w, scalar_t* grad_i, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_h * dst_dim_w; int src_c_stride = src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; int src_y = nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h); int src_y_up = nearest_neighbor_bw_compute_source_index( height_scale, dst_y + 1, src_dim_h + 1); int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w); int src_x_up = nearest_neighbor_bw_compute_source_index( width_scale, dst_x + 1, src_dim_w + 1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; for (int y = src_y; y < src_y_up; y++) { for (int x = src_x; x < src_x_up; x++) { int src_idx = b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x; grad += grad_o[src_idx]; } } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_c_stride; } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_.size(0); int channels = input_.size(1); int input_height = input_.size(2); int input_width = input_.size(3); upsample_2d_shape_check( input_, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({nbatch, channels, output_height, output_width}); if (input.numel() == 0) { return; } int nc = nbatch * channels; const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; // upsample_2d_shape_check makes sure input/output tensor is not empty; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads)); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(output_height), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y)); const dim3 block(block_x, block_y, block_z); int grid_x = cuda::ATenCeilDiv(output_width, block_x); int grid_y = cuda::ATenCeilDiv(output_height, block_y); int grid_z = std::min<int>( maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4)); const dim3 grid(grid_x, grid_y, grid_z); // Error out on cases where grid_x & grid_y exceeds limit of launch config, as // the current kernel implementation doesn't loop over the two dimensions. // This is unlikely to happen. // TODO: kernel implementation could stride on spatial dimension. We probably // need to overhaul the kernel. TORCH_CHECK( grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1], "input tensor has spatial dimension larger than the kernel capacity"); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height); const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width); hipLaunchKernelGGL(( upsample_nearest2d_out_frame<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, stream, idata, odata, nc, input_height, input_width, output_height, output_width, height_scale, width_scale); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); if (grad_input.numel() == 0) { return; } // upsample_2d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height); const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width); hipLaunchKernelGGL(( upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t>) , dim3(gdim), dim3(bdim), 0, stream, odata, nbatch, channels, output_height, output_width, input_height, input_width, idata, height_scale, width_scale); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace Tensor& upsample_nearest2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); return output; } Tensor upsample_nearest2d_cuda(const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); return output; } Tensor& upsample_nearest2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); return grad_input; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); return grad_input; } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_nearest2d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w); return output; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, scale_h, scale_w); return grad_input; } } // namespace native } // namespace at
90eea85e1f2d5059d4b6c542fe3af415d80fecd6.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_out_frame( const scalar_t* idata, scalar_t* odata, const size_t nc, const size_t height1, const size_t width1, const size_t height2, const size_t width2, float height_scale, float width_scale) { size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z; int w2 = threadIdx.x + blockIdx.x * blockDim.x; int h2 = threadIdx.y + blockIdx.y * blockDim.y; if (w2 >= width2 || h2 >= height2) { return; } int nc_stride = blockDim.z * gridDim.z; const size_t h1 = height1 == height2 ? h2 : nearest_neighbor_compute_source_index(height_scale, h2, height1); const size_t w1 = width1 == width2 ? w2 : nearest_neighbor_compute_source_index(width_scale, w2, width1); size_t src_index = (nc_iter * height1 + h1) * width1 + w1; size_t src_index_stride = nc_stride * width1 * height1; size_t dst_index = (nc_iter * height2 + h2) * width2 + w2; size_t dst_index_stride = nc_stride * width2 * height2; // iterating over while (nc_iter < nc) { odata[dst_index] = idata[src_index]; dst_index += dst_index_stride; src_index += src_index_stride; nc_iter += nc_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_h, size_t dst_dim_w, scalar_t* grad_i, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_h * dst_dim_w; int src_c_stride = src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; int src_y = nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h); int src_y_up = nearest_neighbor_bw_compute_source_index( height_scale, dst_y + 1, src_dim_h + 1); int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w); int src_x_up = nearest_neighbor_bw_compute_source_index( width_scale, dst_x + 1, src_dim_w + 1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; for (int y = src_y; y < src_y_up; y++) { for (int x = src_x; x < src_x_up; x++) { int src_idx = b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x; grad += grad_o[src_idx]; } } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_c_stride; } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_.size(0); int channels = input_.size(1); int input_height = input_.size(2); int input_width = input_.size(3); upsample_2d_shape_check( input_, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({nbatch, channels, output_height, output_width}); if (input.numel() == 0) { return; } int nc = nbatch * channels; const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; // upsample_2d_shape_check makes sure input/output tensor is not empty; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads)); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(output_height), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y)); const dim3 block(block_x, block_y, block_z); int grid_x = cuda::ATenCeilDiv(output_width, block_x); int grid_y = cuda::ATenCeilDiv(output_height, block_y); int grid_z = std::min<int>( maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4)); const dim3 grid(grid_x, grid_y, grid_z); // Error out on cases where grid_x & grid_y exceeds limit of launch config, as // the current kernel implementation doesn't loop over the two dimensions. // This is unlikely to happen. // TODO: kernel implementation could stride on spatial dimension. We probably // need to overhaul the kernel. TORCH_CHECK( grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1], "input tensor has spatial dimension larger than the kernel capacity"); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height); const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width); upsample_nearest2d_out_frame<scalar_t, accscalar_t> <<<grid, block, 0, stream>>>( idata, odata, nc, input_height, input_width, output_height, output_width, height_scale, width_scale); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); if (grad_input.numel() == 0) { return; } // upsample_2d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height); const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width); upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t> <<<gdim, bdim, 0, stream>>>( odata, nbatch, channels, output_height, output_width, input_height, input_width, idata, height_scale, width_scale); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace Tensor& upsample_nearest2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); return output; } Tensor upsample_nearest2d_cuda(const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); return output; } Tensor& upsample_nearest2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); return grad_input; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); return grad_input; } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_nearest2d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w); return output; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, scale_h, scale_w); return grad_input; } } // namespace native } // namespace at
452e9884c5507298e481f1862eaf7925ddcb3be4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Based on the work of Andrew Krepps #include <chrono> #include <fstream> #include <random> #include <stdio.h> #include <string> // Uses the GPU to add the block + thread index in array_a to array_b to array_results __global__ void add_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] + array_b[index]; } // Uses the GPU to subtract the block + thread index in array_b from array_a to array_results __global__ void sub_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] - array_b[index]; } // Uses the GPU to multiply the block + thread index in array_a by array_b to array_results __global__ void mult_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] * array_b[index]; } // Uses the GPU to mudulot the block + thread index in array_a by array_b to array_results __global__ void mod_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] % array_b[index]; } int main(int argc, char** argv) { // read command line arguments int totalThreads = 512; int blockSize = 256; bool outputResults = false; std::string outputName; if (argc >= 2) { totalThreads = atoi(argv[1]); } if (argc >= 3) { blockSize = atoi(argv[2]); } if (argc >= 4) { outputResults = true; outputName = argv[3]; } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } // Used for random number generation std::default_random_engine generator; std::uniform_int_distribution<int> distribution(0,3); // Device variables int* d_array_a; int* d_array_b; int* d_add_results; int* d_sub_results; int* d_mult_results; int* d_mod_results; // Host Variables int array_a[totalThreads]; int array_b[totalThreads]; int add_results[totalThreads]; int sub_results[totalThreads]; int mult_results[totalThreads]; int mod_results[totalThreads]; // Generate values for arrays. for( int i = 0; i < totalThreads; i++ ) { array_a[i] = i; array_b[i] = distribution( generator ); } auto array_size = totalThreads * sizeof(int); // Malloc GPU arrays hipMalloc((void **)&d_array_a, array_size); hipMalloc((void **)&d_array_b, array_size); hipMalloc((void **)&d_add_results, array_size); hipMalloc((void **)&d_sub_results, array_size); hipMalloc((void **)&d_mult_results, array_size); hipMalloc((void **)&d_mod_results, array_size); // Copy array values to Device hipMemcpy( d_array_a, array_a, array_size, hipMemcpyHostToDevice ); hipMemcpy( d_array_b, array_b, array_size, hipMemcpyHostToDevice ); // Execute assignment operations auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( add_arrays), dim3(numBlocks), dim3(blockSize), 0, 0, d_array_a, d_array_b, d_add_results); auto stop = std::chrono::high_resolution_clock::now(); auto add_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( sub_arrays), dim3(numBlocks), dim3(blockSize), 0, 0, d_array_a, d_array_b, d_sub_results); stop = std::chrono::high_resolution_clock::now(); auto sub_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( mult_arrays), dim3(numBlocks), dim3(blockSize), 0, 0, d_array_a, d_array_b, d_mult_results); stop = std::chrono::high_resolution_clock::now(); auto mult_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( mod_arrays), dim3(numBlocks), dim3(blockSize), 0, 0, d_array_a, d_array_b, d_mod_results); stop = std::chrono::high_resolution_clock::now(); auto mod_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); // Copy results to host hipMemcpy( &add_results, d_add_results, array_size, hipMemcpyDeviceToHost); hipMemcpy( &sub_results, d_sub_results, array_size, hipMemcpyDeviceToHost); hipMemcpy( &mult_results, d_mult_results, array_size, hipMemcpyDeviceToHost); hipMemcpy( &mod_results, d_mod_results, array_size, hipMemcpyDeviceToHost); // cleanup hipFree(d_array_a); hipFree(d_array_b); hipFree(d_add_results); hipFree(d_sub_results); hipFree(d_mult_results); hipFree(d_mod_results); printf("Results with Thread Count: %d and Block Size: %d\n", totalThreads, blockSize); printf("Add Time nanoseconds:\t %ld\n", add_time); printf("Sub Time nanoseconds:\t %ld\n", sub_time); printf("Mult Time nanoseconds:\t %ld\n", mult_time); printf("Mod Time nanoseconds:\t %ld\n", mod_time); if (outputResults) { std::ofstream stream(outputName); if (stream.is_open()) { stream << "Results with Thread Count: " << totalThreads << " and Block Size: " << blockSize << "\n"; stream << "Add Time nanoseconds:\t" << add_time << "\n"; stream << "Sub Time nanoseconds:\t" << sub_time << "\n"; stream << "Mult Time nanoseconds:\t" << mult_time << "\n"; stream << "Mod Time nanoseconds:\t" << mod_time << "\n"; stream << "Add Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") + B(" << array_b[i] << ") = " << add_results[i] << "\n"; } stream << "\n\nSub Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") - B(" << array_b[i] << ") = " << sub_results[i] << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") * B(" << array_b[i] << ") = " << mult_results[i] << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") % B(" << array_b[i] << ") = " << mod_results[i] << "\n"; } } else{ printf("FILE NOT OPEN?\n"); } stream.close(); } printf("\n"); }
452e9884c5507298e481f1862eaf7925ddcb3be4.cu
//Based on the work of Andrew Krepps #include <chrono> #include <fstream> #include <random> #include <stdio.h> #include <string> // Uses the GPU to add the block + thread index in array_a to array_b to array_results __global__ void add_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] + array_b[index]; } // Uses the GPU to subtract the block + thread index in array_b from array_a to array_results __global__ void sub_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] - array_b[index]; } // Uses the GPU to multiply the block + thread index in array_a by array_b to array_results __global__ void mult_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] * array_b[index]; } // Uses the GPU to mudulot the block + thread index in array_a by array_b to array_results __global__ void mod_arrays( const int* const array_a, const int* const array_b, int* const array_results) { int index = threadIdx.x + blockIdx.x * blockDim.x; array_results[index] = array_a[index] % array_b[index]; } int main(int argc, char** argv) { // read command line arguments int totalThreads = 512; int blockSize = 256; bool outputResults = false; std::string outputName; if (argc >= 2) { totalThreads = atoi(argv[1]); } if (argc >= 3) { blockSize = atoi(argv[2]); } if (argc >= 4) { outputResults = true; outputName = argv[3]; } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } // Used for random number generation std::default_random_engine generator; std::uniform_int_distribution<int> distribution(0,3); // Device variables int* d_array_a; int* d_array_b; int* d_add_results; int* d_sub_results; int* d_mult_results; int* d_mod_results; // Host Variables int array_a[totalThreads]; int array_b[totalThreads]; int add_results[totalThreads]; int sub_results[totalThreads]; int mult_results[totalThreads]; int mod_results[totalThreads]; // Generate values for arrays. for( int i = 0; i < totalThreads; i++ ) { array_a[i] = i; array_b[i] = distribution( generator ); } auto array_size = totalThreads * sizeof(int); // Malloc GPU arrays cudaMalloc((void **)&d_array_a, array_size); cudaMalloc((void **)&d_array_b, array_size); cudaMalloc((void **)&d_add_results, array_size); cudaMalloc((void **)&d_sub_results, array_size); cudaMalloc((void **)&d_mult_results, array_size); cudaMalloc((void **)&d_mod_results, array_size); // Copy array values to Device cudaMemcpy( d_array_a, array_a, array_size, cudaMemcpyHostToDevice ); cudaMemcpy( d_array_b, array_b, array_size, cudaMemcpyHostToDevice ); // Execute assignment operations auto start = std::chrono::high_resolution_clock::now(); add_arrays<<<numBlocks, blockSize>>>(d_array_a, d_array_b, d_add_results); auto stop = std::chrono::high_resolution_clock::now(); auto add_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); sub_arrays<<<numBlocks, blockSize>>>(d_array_a, d_array_b, d_sub_results); stop = std::chrono::high_resolution_clock::now(); auto sub_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); mult_arrays<<<numBlocks, blockSize>>>(d_array_a, d_array_b, d_mult_results); stop = std::chrono::high_resolution_clock::now(); auto mult_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); start = std::chrono::high_resolution_clock::now(); mod_arrays<<<numBlocks, blockSize>>>(d_array_a, d_array_b, d_mod_results); stop = std::chrono::high_resolution_clock::now(); auto mod_time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); // Copy results to host cudaMemcpy( &add_results, d_add_results, array_size, cudaMemcpyDeviceToHost); cudaMemcpy( &sub_results, d_sub_results, array_size, cudaMemcpyDeviceToHost); cudaMemcpy( &mult_results, d_mult_results, array_size, cudaMemcpyDeviceToHost); cudaMemcpy( &mod_results, d_mod_results, array_size, cudaMemcpyDeviceToHost); // cleanup cudaFree(d_array_a); cudaFree(d_array_b); cudaFree(d_add_results); cudaFree(d_sub_results); cudaFree(d_mult_results); cudaFree(d_mod_results); printf("Results with Thread Count: %d and Block Size: %d\n", totalThreads, blockSize); printf("Add Time nanoseconds:\t %ld\n", add_time); printf("Sub Time nanoseconds:\t %ld\n", sub_time); printf("Mult Time nanoseconds:\t %ld\n", mult_time); printf("Mod Time nanoseconds:\t %ld\n", mod_time); if (outputResults) { std::ofstream stream(outputName); if (stream.is_open()) { stream << "Results with Thread Count: " << totalThreads << " and Block Size: " << blockSize << "\n"; stream << "Add Time nanoseconds:\t" << add_time << "\n"; stream << "Sub Time nanoseconds:\t" << sub_time << "\n"; stream << "Mult Time nanoseconds:\t" << mult_time << "\n"; stream << "Mod Time nanoseconds:\t" << mod_time << "\n"; stream << "Add Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") + B(" << array_b[i] << ") = " << add_results[i] << "\n"; } stream << "\n\nSub Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") - B(" << array_b[i] << ") = " << sub_results[i] << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") * B(" << array_b[i] << ") = " << mult_results[i] << "\n"; } stream << "\n\nMult Results:\n"; for( int i = 0; i < totalThreads; i++ ) { stream << "A(" << array_a[i] << ") % B(" << array_b[i] << ") = " << mod_results[i] << "\n"; } } else{ printf("FILE NOT OPEN?\n"); } stream.close(); } printf("\n"); }
b7ff28248031890c2c7ffbd1912a51320d3f5a22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file NumericLimits.test.cu //---------------------------------------------------------------------------// #include "NumericLimits.test.hh" #include "base/Assert.hh" #include "base/KernelParamCalculator.cuda.hh" #include "base/NumericLimits.hh" namespace celeritas_test { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// template<class T> __global__ void nl_test_kernel(NLTestOutput<T>* data) { using limits_t = celeritas::numeric_limits<T>; unsigned int local_thread_id = celeritas::KernelParamCalculator::thread_id().get(); if (local_thread_id == 0) { data->eps = limits_t::epsilon(); } else if (local_thread_id == 1) { data->nan = limits_t::quiet_NaN(); } else if (local_thread_id == 2) { data->inf = limits_t::infinity(); } else if (local_thread_id == 3) { data->max = limits_t::max(); } } //---------------------------------------------------------------------------// // TESTING INTERFACE //---------------------------------------------------------------------------// //! Run on device and return results template<class T> NLTestOutput<T> nl_test() { // Allocate output data NLTestOutput<T>* result_device; CELER_CUDA_CALL(hipMalloc(&result_device, sizeof(NLTestOutput<T>))); celeritas::KernelParamCalculator calc_launch_params; auto params = calc_launch_params(3); hipLaunchKernelGGL(( nl_test_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0, result_device); CELER_CUDA_CALL(hipDeviceSynchronize()); // Copy to host NLTestOutput<T> result; CELER_CUDA_CALL(hipMemcpy(&result, result_device, sizeof(NLTestOutput<T>), hipMemcpyDeviceToHost)); CELER_CUDA_CALL(hipFree(result_device)); return result; } //---------------------------------------------------------------------------// // EXPLICIT INSTANTIATION //---------------------------------------------------------------------------// template NLTestOutput<float> nl_test<float>(); template NLTestOutput<double> nl_test<double>(); //---------------------------------------------------------------------------// } // namespace celeritas_test
b7ff28248031890c2c7ffbd1912a51320d3f5a22.cu
//---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file NumericLimits.test.cu //---------------------------------------------------------------------------// #include "NumericLimits.test.hh" #include "base/Assert.hh" #include "base/KernelParamCalculator.cuda.hh" #include "base/NumericLimits.hh" namespace celeritas_test { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// template<class T> __global__ void nl_test_kernel(NLTestOutput<T>* data) { using limits_t = celeritas::numeric_limits<T>; unsigned int local_thread_id = celeritas::KernelParamCalculator::thread_id().get(); if (local_thread_id == 0) { data->eps = limits_t::epsilon(); } else if (local_thread_id == 1) { data->nan = limits_t::quiet_NaN(); } else if (local_thread_id == 2) { data->inf = limits_t::infinity(); } else if (local_thread_id == 3) { data->max = limits_t::max(); } } //---------------------------------------------------------------------------// // TESTING INTERFACE //---------------------------------------------------------------------------// //! Run on device and return results template<class T> NLTestOutput<T> nl_test() { // Allocate output data NLTestOutput<T>* result_device; CELER_CUDA_CALL(cudaMalloc(&result_device, sizeof(NLTestOutput<T>))); celeritas::KernelParamCalculator calc_launch_params; auto params = calc_launch_params(3); nl_test_kernel<<<params.grid_size, params.block_size>>>(result_device); CELER_CUDA_CALL(cudaDeviceSynchronize()); // Copy to host NLTestOutput<T> result; CELER_CUDA_CALL(cudaMemcpy(&result, result_device, sizeof(NLTestOutput<T>), cudaMemcpyDeviceToHost)); CELER_CUDA_CALL(cudaFree(result_device)); return result; } //---------------------------------------------------------------------------// // EXPLICIT INSTANTIATION //---------------------------------------------------------------------------// template NLTestOutput<float> nl_test<float>(); template NLTestOutput<double> nl_test<double>(); //---------------------------------------------------------------------------// } // namespace celeritas_test
48eab63c465d2f6709085d5d0a62c3cccbc2ecf6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <histogram.h> #include <cuda_gl_interop.h> texture<uchar2, 2, hipReadModeElementType> tex_y; texture<uchar4, 2, hipReadModeElementType> tex_uv; __device__ int dev_max[3]; __device__ inline uint8_t red(uint8_t y, uint8_t v) { return (uint8_t) (y+1.5958*v); } __device__ inline uint8_t green(uint8_t y, uint8_t u, uint8_t v) { return (uint8_t) (y-0.39173*u-0.81290*v); } __device__ inline uint8_t blue(uint8_t y, uint8_t u) { return (uint8_t) (y+2.017*u); } __global__ void comp_histogram(GLint* hist_r, GLint* hist_g, GLint* hist_b, int w, int h) { int x, y; uint8_t r_c, g_c, b_c; uint8_t y_c, u_c, v_c; __shared__ int temp_hist_r[256]; __shared__ int temp_hist_g[256]; __shared__ int temp_hist_b[256]; if (blockIdx.x == 0) { hist_r[1+(2*threadIdx.x)] = 0; hist_g[1+(2*threadIdx.x)] = 0; hist_b[1+(2*threadIdx.x)] = 0; hist_r[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; hist_g[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; hist_b[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; } __syncthreads(); temp_hist_r[threadIdx.x] = 0; temp_hist_g[threadIdx.x] = 0; temp_hist_b[threadIdx.x] = 0; __syncthreads(); y = blockIdx.x; while (y < h) { x = threadIdx.x; while (x < w) { y_c = tex2D(tex_y, x, y).x; u_c = tex2D(tex_uv, x/2, y).y; v_c = tex2D(tex_uv, x/2, y).w; r_c = red(y_c, v_c); g_c = green(y_c, u_c, v_c); b_c = blue(y_c, u_c); atomicAdd(&temp_hist_r[r_c], 1); atomicAdd(&temp_hist_g[g_c], 1); atomicAdd(&temp_hist_b[b_c], 1); x += blockDim.x; } y += gridDim.x; } __syncthreads(); atomicAdd(&hist_r[1+(2*threadIdx.x)], temp_hist_r[threadIdx.x]); atomicAdd(&hist_g[1+(2*threadIdx.x)], temp_hist_g[threadIdx.x]); atomicAdd(&hist_b[1+(2*threadIdx.x)], temp_hist_b[threadIdx.x]); } __global__ void get_max(GLint* hist_r, GLint* hist_g, GLint* hist_b) { int i; GLint* hist; __shared__ GLint max_hist[3]; hist = (threadIdx.x == 0)?hist_r:((threadIdx.x == 1)?hist_g:hist_b); max_hist[threadIdx.x] = 0; __syncthreads(); for (i = 1; i < 256; i++) { if (hist[1+(2*i)] > max_hist[threadIdx.x]) { max_hist[threadIdx.x] = hist[1+(2*i)]; } } __syncthreads(); dev_max[threadIdx.x] = max_hist[threadIdx.x]; } void print_cuda_device_info(); void compute_histogram(unsigned int* texture, unsigned int* hist_obj, int* hist_max, int width, int height) { hipError_t err; GLint* dev_hist[3] = {0, 0, 0}; hipArray* array[3] = {0, 0, 0}; cudaGraphicsResource* res[3] = {0, 0, 0}; for (int i = 0; i < 3; i++) { hipGLRegisterBufferObject(hist_obj[i]); hipGLMapBufferObject__((void **)&dev_hist[i], hist_obj[i]); } err = hipGraphicsGLRegisterImage(&res[0], texture[0], GL_TEXTURE_2D, hipGraphicsRegisterFlagsReadOnly); if (err != hipSuccess) { printf("hipGraphicsGLRegisterImage Failed: %s", hipGetErrorString(hipGetLastError())); exit(0); } err = hipGraphicsGLRegisterImage(&res[1], texture[1], GL_TEXTURE_2D, hipGraphicsRegisterFlagsReadOnly); if (err != hipSuccess) { printf("hipGraphicsGLRegisterImage Failed: %s", hipGetErrorString(hipGetLastError())); exit(0); } hipGraphicsMapResources(2, res); for (int i = 0; i < 2; i++) { err = hipGraphicsSubResourceGetMappedArray(&array[i], res[i], 0, 0); if (err != hipSuccess) { printf("hipGraphicsSubResourceGetMappedArray Failed: %s", hipGetErrorString(hipGetLastError())); exit(0); } } hipChannelFormatDesc y_chan_desc = hipCreateChannelDesc<uchar2>(); if (hipBindTextureToArray(&tex_y, array[0], &y_chan_desc) != hipSuccess) { printf("Failed to bind y texture: %s\n", hipGetErrorString(hipGetLastError())); exit(0); } hipChannelFormatDesc uv_chan_desc = hipCreateChannelDesc<uchar4>(); if (hipBindTextureToArray(&tex_uv, array[1], &uv_chan_desc) != hipSuccess) { printf("Failed to bind uv texture: %s\n", hipGetErrorString(hipGetLastError())); exit(0); } hipLaunchKernelGGL(( comp_histogram), dim3(64), dim3(256), 0, 0, dev_hist[0], dev_hist[1], dev_hist[2], width, height); hipDeviceSynchronize(); hipLaunchKernelGGL(( get_max), dim3(1), dim3(3), 0, 0, dev_hist[0], dev_hist[1], dev_hist[2]); hipMemcpyFromSymbol(hist_max, dev_max, 3*sizeof(int)); hipUnbindTexture(&tex_y); hipUnbindTexture(&tex_uv); hipGraphicsUnmapResources(2, res); hipGraphicsUnregisterResource(res[0]); hipGraphicsUnregisterResource(res[1]); for (int i = 0; i < 3; i++) { hipGLUnmapBufferObject(hist_obj[i]); hipGLUnregisterBufferObject(hist_obj[i]); } } void print_cuda_device_info() { int count = 0; hipDeviceProp_t prop; hipGetDeviceCount(&count); printf("\nCUDA Device Count: %d", count); for (int i = 0; i < count; i++) { hipGetDeviceProperties(&prop, i); printf("\nDevice: %d", i); printf("\nName: %s", prop.name); printf("\nRevision: Major: %d, Minor: %d", prop.major, prop.minor); printf("\nWarp Size: %d", prop.warpSize); printf("\nMemory Bus width: %d", prop.memoryBusWidth); printf("\nMemory Clock Rate: %d", prop.memoryClockRate); printf("\nConcurrent Kernels: %d", prop.concurrentKernels); printf("\nMultiprocessor count: %d", prop.multiProcessorCount); printf("\nTotal Global Memory: %d", (int)prop.totalGlobalMem); printf("\nTotal Constant Memory: %d", (int)prop.totalConstMem); printf("\nShared Memory per Block: %d", (int)prop.sharedMemPerBlock); printf("\nMax grid dimensions: (%d, %d, %d)", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\nMax threads per block: %d", prop.maxThreadsPerBlock); printf("\nMax threads dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); } }
48eab63c465d2f6709085d5d0a62c3cccbc2ecf6.cu
#include <stdio.h> #include <stdint.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <histogram.h> #include <cuda_gl_interop.h> texture<uchar2, 2, cudaReadModeElementType> tex_y; texture<uchar4, 2, cudaReadModeElementType> tex_uv; __device__ int dev_max[3]; __device__ inline uint8_t red(uint8_t y, uint8_t v) { return (uint8_t) (y+1.5958*v); } __device__ inline uint8_t green(uint8_t y, uint8_t u, uint8_t v) { return (uint8_t) (y-0.39173*u-0.81290*v); } __device__ inline uint8_t blue(uint8_t y, uint8_t u) { return (uint8_t) (y+2.017*u); } __global__ void comp_histogram(GLint* hist_r, GLint* hist_g, GLint* hist_b, int w, int h) { int x, y; uint8_t r_c, g_c, b_c; uint8_t y_c, u_c, v_c; __shared__ int temp_hist_r[256]; __shared__ int temp_hist_g[256]; __shared__ int temp_hist_b[256]; if (blockIdx.x == 0) { hist_r[1+(2*threadIdx.x)] = 0; hist_g[1+(2*threadIdx.x)] = 0; hist_b[1+(2*threadIdx.x)] = 0; hist_r[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; hist_g[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; hist_b[(2*threadIdx.x)] = (threadIdx.x == 0)?0:threadIdx.x; } __syncthreads(); temp_hist_r[threadIdx.x] = 0; temp_hist_g[threadIdx.x] = 0; temp_hist_b[threadIdx.x] = 0; __syncthreads(); y = blockIdx.x; while (y < h) { x = threadIdx.x; while (x < w) { y_c = tex2D(tex_y, x, y).x; u_c = tex2D(tex_uv, x/2, y).y; v_c = tex2D(tex_uv, x/2, y).w; r_c = red(y_c, v_c); g_c = green(y_c, u_c, v_c); b_c = blue(y_c, u_c); atomicAdd(&temp_hist_r[r_c], 1); atomicAdd(&temp_hist_g[g_c], 1); atomicAdd(&temp_hist_b[b_c], 1); x += blockDim.x; } y += gridDim.x; } __syncthreads(); atomicAdd(&hist_r[1+(2*threadIdx.x)], temp_hist_r[threadIdx.x]); atomicAdd(&hist_g[1+(2*threadIdx.x)], temp_hist_g[threadIdx.x]); atomicAdd(&hist_b[1+(2*threadIdx.x)], temp_hist_b[threadIdx.x]); } __global__ void get_max(GLint* hist_r, GLint* hist_g, GLint* hist_b) { int i; GLint* hist; __shared__ GLint max_hist[3]; hist = (threadIdx.x == 0)?hist_r:((threadIdx.x == 1)?hist_g:hist_b); max_hist[threadIdx.x] = 0; __syncthreads(); for (i = 1; i < 256; i++) { if (hist[1+(2*i)] > max_hist[threadIdx.x]) { max_hist[threadIdx.x] = hist[1+(2*i)]; } } __syncthreads(); dev_max[threadIdx.x] = max_hist[threadIdx.x]; } void print_cuda_device_info(); void compute_histogram(unsigned int* texture, unsigned int* hist_obj, int* hist_max, int width, int height) { cudaError_t err; GLint* dev_hist[3] = {0, 0, 0}; cudaArray* array[3] = {0, 0, 0}; cudaGraphicsResource* res[3] = {0, 0, 0}; for (int i = 0; i < 3; i++) { cudaGLRegisterBufferObject(hist_obj[i]); cudaGLMapBufferObject((void **)&dev_hist[i], hist_obj[i]); } err = cudaGraphicsGLRegisterImage(&res[0], texture[0], GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly); if (err != cudaSuccess) { printf("cudaGraphicsGLRegisterImage Failed: %s", cudaGetErrorString(cudaGetLastError())); exit(0); } err = cudaGraphicsGLRegisterImage(&res[1], texture[1], GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly); if (err != cudaSuccess) { printf("cudaGraphicsGLRegisterImage Failed: %s", cudaGetErrorString(cudaGetLastError())); exit(0); } cudaGraphicsMapResources(2, res); for (int i = 0; i < 2; i++) { err = cudaGraphicsSubResourceGetMappedArray(&array[i], res[i], 0, 0); if (err != cudaSuccess) { printf("cudaGraphicsSubResourceGetMappedArray Failed: %s", cudaGetErrorString(cudaGetLastError())); exit(0); } } cudaChannelFormatDesc y_chan_desc = cudaCreateChannelDesc<uchar2>(); if (cudaBindTextureToArray(&tex_y, array[0], &y_chan_desc) != cudaSuccess) { printf("Failed to bind y texture: %s\n", cudaGetErrorString(cudaGetLastError())); exit(0); } cudaChannelFormatDesc uv_chan_desc = cudaCreateChannelDesc<uchar4>(); if (cudaBindTextureToArray(&tex_uv, array[1], &uv_chan_desc) != cudaSuccess) { printf("Failed to bind uv texture: %s\n", cudaGetErrorString(cudaGetLastError())); exit(0); } comp_histogram<<<64, 256>>>(dev_hist[0], dev_hist[1], dev_hist[2], width, height); cudaThreadSynchronize(); get_max<<<1, 3>>>(dev_hist[0], dev_hist[1], dev_hist[2]); cudaMemcpyFromSymbol(hist_max, dev_max, 3*sizeof(int)); cudaUnbindTexture(&tex_y); cudaUnbindTexture(&tex_uv); cudaGraphicsUnmapResources(2, res); cudaGraphicsUnregisterResource(res[0]); cudaGraphicsUnregisterResource(res[1]); for (int i = 0; i < 3; i++) { cudaGLUnmapBufferObject(hist_obj[i]); cudaGLUnregisterBufferObject(hist_obj[i]); } } void print_cuda_device_info() { int count = 0; cudaDeviceProp prop; cudaGetDeviceCount(&count); printf("\nCUDA Device Count: %d", count); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); printf("\nDevice: %d", i); printf("\nName: %s", prop.name); printf("\nRevision: Major: %d, Minor: %d", prop.major, prop.minor); printf("\nWarp Size: %d", prop.warpSize); printf("\nMemory Bus width: %d", prop.memoryBusWidth); printf("\nMemory Clock Rate: %d", prop.memoryClockRate); printf("\nConcurrent Kernels: %d", prop.concurrentKernels); printf("\nMultiprocessor count: %d", prop.multiProcessorCount); printf("\nTotal Global Memory: %d", (int)prop.totalGlobalMem); printf("\nTotal Constant Memory: %d", (int)prop.totalConstMem); printf("\nShared Memory per Block: %d", (int)prop.sharedMemPerBlock); printf("\nMax grid dimensions: (%d, %d, %d)", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("\nMax threads per block: %d", prop.maxThreadsPerBlock); printf("\nMax threads dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); } }
8c70c2cb8de671d97195d137281522061a8843bc.hip
// !!! This is a file automatically generated by hipify!!! // ************************************************************************** // // PARALUTION www.paralution.com // // Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschrnkt) & Co. KG // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRA 706051 // Vertreten durch: // PARALUTION Labs Verwaltungs UG (haftungsbeschrnkt) // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRB 721277 // Geschftsfhrer: Dimitar Lukarski, Nico Trost // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************** // PARALUTION version 1.1.0 #include "../../utils/def.hpp" #include "gpu_matrix_csr.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_mcsr.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_mcsr.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <hip/hip_runtime.h> #include <cusparse_v2.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::GPUAcceleratorMatrixMCSR() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::GPUAcceleratorMatrixMCSR(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixMCSR::GPUAcceleratorMatrixMCSR()", "constructor with local_backend"); this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::~GPUAcceleratorMatrixMCSR() { LOG_DEBUG(this, "GPUAcceleratorMatrixMCSR::~GPUAcceleratorMatrixMCSR()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixMCSR<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::AllocateMCSR(const int nnz, const int nrow, const int ncol) { assert(nnz >= 0); assert(ncol >= 0); assert(nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (nnz > 0) { allocate_gpu(nrow+1, &this->mat_.row_offset); allocate_gpu(nnz, &this->mat_.col); allocate_gpu(nnz, &this->mat_.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nrow+1, mat_.row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.val); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::SetDataPtrMCSR(int **row_offset, int **col, ValueType **val, const int nnz, const int nrow, const int ncol) { assert(*row_offset != NULL); assert(*col != NULL); assert(*val != NULL); assert(nnz > 0); assert(nrow > 0); assert(ncol > 0); this->Clear(); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; hipDeviceSynchronize(); this->mat_.row_offset = *row_offset; this->mat_.col = *col; this->mat_.val = *val; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::LeaveDataPtrMCSR(int **row_offset, int **col, ValueType **val) { assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); assert(this->get_nnz() > 0); hipDeviceSynchronize(); // see free_host function for details *row_offset = this->mat_.row_offset; *col = this->mat_.col; *val = this->mat_.val; this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.row_offset); free_gpu(&this->mat_.col); free_gpu(&this->mat_.val); this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); hipMemcpy(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixMCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateMCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); hipMemcpy(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); hipMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixMCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateMCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); hipMemcpyAsync(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixMCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateMCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); hipMemcpyAsync(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); hipMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixMCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateMCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixMCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr; if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_mcsr); return true; } /* const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = cast_mat_csr->get_nnz(); return true; } */ return false; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in); GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out); assert(cast_in != NULL); assert(cast_out!= NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_mcsr_spmv_scalar<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in); GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out); assert(cast_in != NULL); assert(cast_out!= NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_mcsr_add_spmv_scalar<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } template class GPUAcceleratorMatrixMCSR<double>; template class GPUAcceleratorMatrixMCSR<float>; }
8c70c2cb8de671d97195d137281522061a8843bc.cu
// ************************************************************************** // // PARALUTION www.paralution.com // // Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschränkt) & Co. KG // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRA 706051 // Vertreten durch: // PARALUTION Labs Verwaltungs UG (haftungsbeschränkt) // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRB 721277 // Geschäftsführer: Dimitar Lukarski, Nico Trost // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************** // PARALUTION version 1.1.0 #include "../../utils/def.hpp" #include "gpu_matrix_csr.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_mcsr.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_mcsr.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <cuda.h> #include <cusparse_v2.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::GPUAcceleratorMatrixMCSR() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::GPUAcceleratorMatrixMCSR(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixMCSR::GPUAcceleratorMatrixMCSR()", "constructor with local_backend"); this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixMCSR<ValueType>::~GPUAcceleratorMatrixMCSR() { LOG_DEBUG(this, "GPUAcceleratorMatrixMCSR::~GPUAcceleratorMatrixMCSR()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixMCSR<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::AllocateMCSR(const int nnz, const int nrow, const int ncol) { assert(nnz >= 0); assert(ncol >= 0); assert(nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (nnz > 0) { allocate_gpu(nrow+1, &this->mat_.row_offset); allocate_gpu(nnz, &this->mat_.col); allocate_gpu(nnz, &this->mat_.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nrow+1, mat_.row_offset); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, nnz, mat_.val); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::SetDataPtrMCSR(int **row_offset, int **col, ValueType **val, const int nnz, const int nrow, const int ncol) { assert(*row_offset != NULL); assert(*col != NULL); assert(*val != NULL); assert(nnz > 0); assert(nrow > 0); assert(ncol > 0); this->Clear(); this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = nnz; cudaDeviceSynchronize(); this->mat_.row_offset = *row_offset; this->mat_.col = *col; this->mat_.val = *val; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::LeaveDataPtrMCSR(int **row_offset, int **col, ValueType **val) { assert(this->get_nrow() > 0); assert(this->get_ncol() > 0); assert(this->get_nnz() > 0); cudaDeviceSynchronize(); // see free_host function for details *row_offset = this->mat_.row_offset; *col = this->mat_.col; *val = this->mat_.val; this->mat_.row_offset = NULL; this->mat_.col = NULL; this->mat_.val = NULL; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.row_offset); free_gpu(&this->mat_.col); free_gpu(&this->mat_.val); this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); cudaMemcpy(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixMCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateMCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); cudaMemcpy(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); cudaMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixMCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateMCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); cudaMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); cudaMemcpyAsync(this->mat_.row_offset, // dst cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.col, // dst cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.val, // dst cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixMCSR<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixMCSR<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateMCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); cudaMemcpyAsync(cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateMCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() ); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); cudaMemcpy(this->mat_.row_offset, // dst gpu_cast_mat->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.col, // dst gpu_cast_mat->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.val, // dst gpu_cast_mat->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixMCSR<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixMCSR<ValueType>*> (dst)) != NULL) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateMCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() ); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); cudaMemcpy(gpu_cast_mat->mat_.row_offset, // dst this->mat_.row_offset, // src (this->get_nrow()+1)*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.col, // dst this->mat_.col, // src this->get_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.val, // dst this->mat_.val, // src this->get_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } else { //GPU to CPU if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixMCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr; if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_mcsr); return true; } /* const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); FATAL_ERROR(__FILE__, __LINE__); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = cast_mat_csr->get_nnz(); return true; } */ return false; } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in); GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out); assert(cast_in != NULL); assert(cast_out!= NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_mcsr_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixMCSR<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in); GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out); assert(cast_in != NULL); assert(cast_out!= NULL); int nrow = this->get_nrow(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_mcsr_add_spmv_scalar<ValueType, int> <<<GridSize, BlockSize>>> (nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } template class GPUAcceleratorMatrixMCSR<double>; template class GPUAcceleratorMatrixMCSR<float>; }
0f9842dc6fa36db823999dee72bd08e0ce0dab4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * * @file cuThomasBatch.cu * * @brief cuThomasBatch kernel implementaion. * * cuThomasBatch is a software package provided by * Barcelona Supercomputing Center - Centro Nacional de Supercomputacion * * @author Ivan Martinez-Perez [email protected] * @author Pedro Valero-Lara [email protected] * **/ /** * * @ingroup cuThomasBatch * * Solve a set of Tridiagonal linear systems: * * A_ix_i = RHS_i, for all i = 0, ..., N * * N = BATCHCOUNT * * where A is a MxM tridiagonal matrix: * * A_i = [ D_i[0] U_i[1] . . . . * L_i[0] D_i[1] U_i[2] . . . * . L_i[1] D_i[2] . . . * . . L_i[2] . . U_i[M-1] * . . . . L_i[M-2] D_i[M-1] ] * * Note that the elements of the inputs must be interleaved by following the * next pattern for N (BATCHCOUNT) tridiagonal systems and M elements each: * * D_0[0], D_1[0], ..., D_N[0], ..., D_0[M-1], D_1[M-1], ..., D_N[M-1] * **/ /** * * @param[in] * L double *. * L is a pointer to the lower-diagonal vector * * @param[in] * D double *. * D is a pointer to the diagonal vector * * @param[in,out] * U double *. * U is a pointer to the uper-diagonal vector * * @param[in,out] * RHS double *. * RHS is a pointer to the Right Hand Side vector * * * @param[in] * M int. * M specifies the number of elemets of the systems * * @param[in] * BATCHCOUNT int. * BATCHCOUNT specifies to number of systems to be procesed **/ #include "cuThomasBatch.h" __global__ void cuThomasBatch( const double *L, const double *D, double *U, double *RHS, const int M, const int BATCHCOUNT ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid < BATCHCOUNT) { int first = tid; int last = BATCHCOUNT*(M-1)+tid; U[first] /= D[first]; RHS[first] /= D[first]; for (int i = first + BATCHCOUNT; i < last; i+=BATCHCOUNT) { U[i] /= D[i] - L[i] * U[i-BATCHCOUNT]; RHS[i] = ( RHS[i] - L[i] * RHS[i-BATCHCOUNT] ) / ( D[i] - L[i] * U[i-BATCHCOUNT] ); } RHS[last] = ( RHS[last] - L[last] * RHS[last-BATCHCOUNT] ) / ( D[last] - L[last] * U[last-BATCHCOUNT] ); for (int i = last-BATCHCOUNT; i >= first; i-=BATCHCOUNT) { RHS[i] -= U[i] * RHS[i+BATCHCOUNT]; } } }
0f9842dc6fa36db823999dee72bd08e0ce0dab4a.cu
/** * * @file cuThomasBatch.cu * * @brief cuThomasBatch kernel implementaion. * * cuThomasBatch is a software package provided by * Barcelona Supercomputing Center - Centro Nacional de Supercomputacion * * @author Ivan Martinez-Perez [email protected] * @author Pedro Valero-Lara [email protected] * **/ /** * * @ingroup cuThomasBatch * * Solve a set of Tridiagonal linear systems: * * A_ix_i = RHS_i, for all i = 0, ..., N * * N = BATCHCOUNT * * where A is a MxM tridiagonal matrix: * * A_i = [ D_i[0] U_i[1] . . . . * L_i[0] D_i[1] U_i[2] . . . * . L_i[1] D_i[2] . . . * . . L_i[2] . . U_i[M-1] * . . . . L_i[M-2] D_i[M-1] ] * * Note that the elements of the inputs must be interleaved by following the * next pattern for N (BATCHCOUNT) tridiagonal systems and M elements each: * * D_0[0], D_1[0], ..., D_N[0], ..., D_0[M-1], D_1[M-1], ..., D_N[M-1] * **/ /** * * @param[in] * L double *. * L is a pointer to the lower-diagonal vector * * @param[in] * D double *. * D is a pointer to the diagonal vector * * @param[in,out] * U double *. * U is a pointer to the uper-diagonal vector * * @param[in,out] * RHS double *. * RHS is a pointer to the Right Hand Side vector * * * @param[in] * M int. * M specifies the number of elemets of the systems * * @param[in] * BATCHCOUNT int. * BATCHCOUNT specifies to number of systems to be procesed **/ #include "cuThomasBatch.h" __global__ void cuThomasBatch( const double *L, const double *D, double *U, double *RHS, const int M, const int BATCHCOUNT ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid < BATCHCOUNT) { int first = tid; int last = BATCHCOUNT*(M-1)+tid; U[first] /= D[first]; RHS[first] /= D[first]; for (int i = first + BATCHCOUNT; i < last; i+=BATCHCOUNT) { U[i] /= D[i] - L[i] * U[i-BATCHCOUNT]; RHS[i] = ( RHS[i] - L[i] * RHS[i-BATCHCOUNT] ) / ( D[i] - L[i] * U[i-BATCHCOUNT] ); } RHS[last] = ( RHS[last] - L[last] * RHS[last-BATCHCOUNT] ) / ( D[last] - L[last] * U[last-BATCHCOUNT] ); for (int i = last-BATCHCOUNT; i >= first; i-=BATCHCOUNT) { RHS[i] -= U[i] * RHS[i+BATCHCOUNT]; } } }
91d20aeb5f0773a9a7c82e35777ca66578dd9e75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FixWallLJ126.h" #include "BoundsGPU.h" #include "GridGPU.h" #include "State.h" #include "boost_for_export.h" #include "cutils_math.h" #include "WallEvaluate.h" const std::string wallLJ126Type = "WallLJ126"; namespace py = boost::python; // the constructor for FixWallLJ126 FixWallLJ126::FixWallLJ126(SHARED(State) state_, std::string handle_, std::string groupHandle_, Vector origin_, Vector forceDir_, real dist_, real sigma_, real epsilon_) : FixWall(state_, handle_, groupHandle_, wallLJ126Type, true, false, 1, origin_, forceDir_.normalized()), dist(dist_), sigma(sigma_), epsilon(epsilon_) { assert(dist >= 0); }; // this refers to the template in the /Evaluators/ folder - void FixWallLJ126::compute(int virialMode) { GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); int n = state->atoms.size(); if (virialMode) { hipLaunchKernelGGL(( compute_wall_iso<EvaluatorWallLJ126, true>) , dim3(NBLOCK(n)), dim3(PERBLOCK), 0, 0, n, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.virials.d_data.data(), origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); } else { hipLaunchKernelGGL(( compute_wall_iso<EvaluatorWallLJ126, false>) , dim3(NBLOCK(n)), dim3(PERBLOCK), 0, 0, n, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.virials.d_data.data(), origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); } }; void FixWallLJ126::singlePointEng(real *perParticleEng) { GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); int n = state->atoms.size(); hipLaunchKernelGGL(( compute_wall_energy<EvaluatorWallLJ126>) , dim3(NBLOCK(n)), dim3(PERBLOCK), 0, 0, n, gpd.xs(activeIdx), perParticleEng, gpd.fs(activeIdx), // for groupTag origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); return; }; bool FixWallLJ126::prepareForRun() { // instantiate this fix's evaulator with the appropriate parameters evaluator = EvaluatorWallLJ126(sigma, epsilon, dist); prepared = true; return prepared; }; // export function void export_FixWallLJ126() { py::class_<FixWallLJ126, SHARED(FixWallLJ126), py::bases<FixWall>, boost::noncopyable > ( "FixWallLJ126", py::init<SHARED(State), std::string, std::string, Vector, Vector, real, real, real> ( py::args("state", "handle", "groupHandle", "origin", "forceDir", "dist", "sigma", "epsilon") ) ) .def_readwrite("sigma", &FixWallLJ126::sigma) .def_readwrite("epsilon", &FixWallLJ126::epsilon) .def_readwrite("dist", &FixWallLJ126::dist) .def_readwrite("forceDir", &FixWallLJ126::forceDir) .def_readwrite("origin", &FixWallLJ126::origin) ; }
91d20aeb5f0773a9a7c82e35777ca66578dd9e75.cu
#include "FixWallLJ126.h" #include "BoundsGPU.h" #include "GridGPU.h" #include "State.h" #include "boost_for_export.h" #include "cutils_math.h" #include "WallEvaluate.h" const std::string wallLJ126Type = "WallLJ126"; namespace py = boost::python; // the constructor for FixWallLJ126 FixWallLJ126::FixWallLJ126(SHARED(State) state_, std::string handle_, std::string groupHandle_, Vector origin_, Vector forceDir_, real dist_, real sigma_, real epsilon_) : FixWall(state_, handle_, groupHandle_, wallLJ126Type, true, false, 1, origin_, forceDir_.normalized()), dist(dist_), sigma(sigma_), epsilon(epsilon_) { assert(dist >= 0); }; // this refers to the template in the /Evaluators/ folder - void FixWallLJ126::compute(int virialMode) { GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); int n = state->atoms.size(); if (virialMode) { compute_wall_iso<EvaluatorWallLJ126, true> <<<NBLOCK(n), PERBLOCK>>>(n, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.virials.d_data.data(), origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); } else { compute_wall_iso<EvaluatorWallLJ126, false> <<<NBLOCK(n), PERBLOCK>>>(n, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.virials.d_data.data(), origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); } }; void FixWallLJ126::singlePointEng(real *perParticleEng) { GPUData &gpd = state->gpd; int activeIdx = gpd.activeIdx(); int n = state->atoms.size(); compute_wall_energy<EvaluatorWallLJ126> <<<NBLOCK(n), PERBLOCK>>>(n, gpd.xs(activeIdx), perParticleEng, gpd.fs(activeIdx), // for groupTag origin.asreal3(), forceDir.asreal3(), groupTag, evaluator); return; }; bool FixWallLJ126::prepareForRun() { // instantiate this fix's evaulator with the appropriate parameters evaluator = EvaluatorWallLJ126(sigma, epsilon, dist); prepared = true; return prepared; }; // export function void export_FixWallLJ126() { py::class_<FixWallLJ126, SHARED(FixWallLJ126), py::bases<FixWall>, boost::noncopyable > ( "FixWallLJ126", py::init<SHARED(State), std::string, std::string, Vector, Vector, real, real, real> ( py::args("state", "handle", "groupHandle", "origin", "forceDir", "dist", "sigma", "epsilon") ) ) .def_readwrite("sigma", &FixWallLJ126::sigma) .def_readwrite("epsilon", &FixWallLJ126::epsilon) .def_readwrite("dist", &FixWallLJ126::dist) .def_readwrite("forceDir", &FixWallLJ126::forceDir) .def_readwrite("origin", &FixWallLJ126::origin) ; }
50273d1b0014a681da9b53635edcbd03d26d7fce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cassert> #define checkCudaErrors(Code) assert((Code) == hipSuccess) #define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, hipPeekAtLastError())) static constexpr int NumThreads = 32; static constexpr int NumBlocks = 2; __global__ void vectorAdd(int *v) { int tx = threadIdx.x + blockDim.x * blockIdx.x; v[tx] += tx; } int main() { int *d_vec = nullptr; checkCudaErrors(hipMalloc((void**)&d_vec, sizeof(int) * NumBlocks * NumThreads)); // Size is missing `* sizeof(int)` checkCudaErrors(hipMemset(d_vec, 0, NumBlocks * NumThreads)); hipLaunchKernelGGL(( checkCudaLaunch(vectorAdd), dim3(NumBlocks), dim3(NumThreads), 0, 0, d_vec)); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(d_vec)); return 0; }
50273d1b0014a681da9b53635edcbd03d26d7fce.cu
/* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cassert> #define checkCudaErrors(Code) assert((Code) == cudaSuccess) #define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, cudaPeekAtLastError())) static constexpr int NumThreads = 32; static constexpr int NumBlocks = 2; __global__ void vectorAdd(int *v) { int tx = threadIdx.x + blockDim.x * blockIdx.x; v[tx] += tx; } int main() { int *d_vec = nullptr; checkCudaErrors(cudaMalloc((void**)&d_vec, sizeof(int) * NumBlocks * NumThreads)); // Size is missing `* sizeof(int)` checkCudaErrors(cudaMemset(d_vec, 0, NumBlocks * NumThreads)); checkCudaLaunch(vectorAdd<<<NumBlocks, NumThreads>>>(d_vec)); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(d_vec)); return 0; }
589c7b1607aebf9b2744302848abebeec005d183.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void square_elements(float* in, float* out, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; //if ( idx < N) out[idx]=in[idx]*in[idx]; if ( idx < N)out[0]=out[0]+in[idx]*in[idx]; __syncthreads(); } /* Gateway function */ void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); for (i = 0; i < nrhs; i++) { /* Find the dimensions of the data */ m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); /* Create an mxArray for the output data */ //plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[i] = mxCreateDoubleMatrix(1, 1, mxREAL); /* Create an input and output data array on the GPU*/ hipMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); //hipMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); hipMalloc( (void **) &data2f_gpu,sizeof(float)); /* Retrieve the input data */ data1 = mxGetPr(prhs[i]); /* Check if the input array is single or double precision */ category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { /* The input array is single precision, it can be sent directly to the card */ hipMemcpy( data1f_gpu, data1, sizeof(float)*m*n, hipMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } printf("before copyHost to device \n"); hipMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, hipMemcpyHostToDevice); }//if( category == mxDOUBLE_CLASS) //orginal output //data2f = (float *) mxMalloc(sizeof(float)*m*n); data2f = (float *) mxMalloc(sizeof(float)); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; printf("before calling GPU \n"); /* Call function on GPU */ hipLaunchKernelGGL(( square_elements), dim3(dimGrid),dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, n*m); printf("before copy result back \n"); /* Copy result back to host */ //hipMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, hipMemcpyDeviceToHost); hipMemcpy( data2f, data2f_gpu, sizeof(float), hipMemcpyDeviceToHost); /* Create a pointer to the output data */ data2 = mxGetPr(plhs[i]); /* Convert from single to double before returning */ /* for (j = 0; j < m*n; j++) { data2[j] = (double) data2f[j]; } */ printf("before return result to matlab \n"); data2[0] = 0; data2[0] = (double) data2f[0]; /* Clean-up memory on device and host */ mxFree(data1f); mxFree(data2f); hipFree(data1f_gpu); hipFree(data2f_gpu); }// for i }
589c7b1607aebf9b2744302848abebeec005d183.cu
#include "cuda.h" #include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void square_elements(float* in, float* out, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; //if ( idx < N) out[idx]=in[idx]*in[idx]; if ( idx < N)out[0]=out[0]+in[idx]*in[idx]; __syncthreads(); } /* Gateway function */ void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); for (i = 0; i < nrhs; i++) { /* Find the dimensions of the data */ m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); /* Create an mxArray for the output data */ //plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[i] = mxCreateDoubleMatrix(1, 1, mxREAL); /* Create an input and output data array on the GPU*/ cudaMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); //cudaMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); cudaMalloc( (void **) &data2f_gpu,sizeof(float)); /* Retrieve the input data */ data1 = mxGetPr(prhs[i]); /* Check if the input array is single or double precision */ category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { /* The input array is single precision, it can be sent directly to the card */ cudaMemcpy( data1f_gpu, data1, sizeof(float)*m*n, cudaMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } printf("before copyHost to device \n"); cudaMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, cudaMemcpyHostToDevice); }//if( category == mxDOUBLE_CLASS) //orginal output //data2f = (float *) mxMalloc(sizeof(float)*m*n); data2f = (float *) mxMalloc(sizeof(float)); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; printf("before calling GPU \n"); /* Call function on GPU */ square_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, n*m); printf("before copy result back \n"); /* Copy result back to host */ //cudaMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, cudaMemcpyDeviceToHost); cudaMemcpy( data2f, data2f_gpu, sizeof(float), cudaMemcpyDeviceToHost); /* Create a pointer to the output data */ data2 = mxGetPr(plhs[i]); /* Convert from single to double before returning */ /* for (j = 0; j < m*n; j++) { data2[j] = (double) data2f[j]; } */ printf("before return result to matlab \n"); data2[0] = 0; data2[0] = (double) data2f[0]; /* Clean-up memory on device and host */ mxFree(data1f); mxFree(data2f); cudaFree(data1f_gpu); cudaFree(data2f_gpu); }// for i }
522214581e96d983ee77235a63e0e631df281f75.hip
// !!! This is a file automatically generated by hipify!!! // to do list: //1-center of the tissue is manually intorduced in the obtainTwoNewCenter function to recognize if the mothe cell is in front or behind //2- If two division occur at the exact same time, there is a chance of error in detecting mother and daughter cells //3- In function processMemVec the lateralBefore and LateralAfter are not carefully assigned. If the code wanted to be used again for the cases where division is happening, this should be revisited. //4- If the code wanted to be used again for the case where node deletion is active then the function for calculating cell pressure (void SceCells::calCellPressure()) need to be revisited. //5- two bool variables subcellularPolar and cellularPolar are given values inside the code. Although for now it is always true, it is better to be input parameters. //6-the value of L0 in the function calAndAddMM_ContractAdh is directly inside the function. It should be an input of the code //7- In the function calAndAddMM_ContractRepl, the values of Morse potential are equal to the values of sceIIDiv_M[i] in the input file. it should be an input of the code. //8- In the function calBendMulti_Mitotic the equlibrium angle for bending stifness is pi it should be an input for the code //Notes: // 1- Currently the nucleus position is desired location not an enforced position. So, all the functions which used "nucleusLocX" & "nucleusLocY" are not active. Instead two variables "nucleusDesireLocX" & "nucleusDesireLocY" are active and internal avg position represent where the nuclei are located. // 2- NucleusLocPercent info is currently updated in "copyFirstCellArr_M" using the mother cellrank and daughter cellrank. For now, the daughter cell // will inherit the same percentage as the mother cell. - Kevin // 3- We currently force all cell area to be positive after its execution. However, this is only an adhoc method since modifying the CalTriArea anyway will lead // to seg fault. Will need to figure out why eventually. - Kevin // 4- To accommodate non-convex cell shape during division, the "isMotherCellBehind" is set to be true all the time. Since we are not tracking lineage in anyway at the moment // , it is acceptable. - Kevin // 5- "calApicalBasalRingIds" function is not updated for the newest division algorithm (for non-convex cell) using basal and apical loc of each cell. Since we are not computing contractile ring forces, // , it is unnecessary. However, if we do indeed need to compute the force, it has to be updated accordingly. - Kevin // 6- The 'contractileSpringGrowthProgressSpeed' and similar constants must be recalculated if the min and max distance from the cell center is adjusted for mitotic rounding. - Kevin #include "SceCells.h" #include <cmath> #include <numeric> //# define debugModeECM double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double kContractMemb ; __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceN_M[5]; //Ali __constant__ double sceIIDiv_M[5]; __constant__ double sceNDiv_M[5]; //Ali __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali namespace patch{ template <typename T> std::string to_string (const T& n) { std:: ostringstream stm ; stm << n ; return stm.str() ; } } //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri, double adhereIndex) { /* if (adhereIndex==-1) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } */ // else { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } // } } // //Ali __device__ double calMembrForce_Actin(double& length, double kAvg) { return ((length - membrEquLen) * kAvg); } __device__ double calMembrForce_ActinMitotic(double& length, double kAvg) { return ((length - 0.5*membrEquLen) * kAvg); } __device__ double CalMembrLinSpringEnergy(double& length, double kAvg) { return (0.5*kAvg *(length - membrEquLen)*(length - membrEquLen)) ; } __device__ double DefaultMembraneStiff() { int kStiff=membrStiff ; return kStiff; } __device__ double CalExtForce(double curTime) { return min(curTime * F_Ext_Incline_M2,10.0); } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } //void SceCells::UpdateTimeStepByAdaptiveMethod( double adaptiveLevelCoef,double minDt,double maxDt, double & dt) { //double energyPrime=( energyCell.totalNodeEnergyCell +eCM.energyECM.totalEnergyECM - energyCell.totalNodeEnergyCellOld - eCM.energyECM.totalEnergyECMOld)/dt ; //eCM.energyECM.totalEnergyPrimeECM=( eCM.energyECM.totalEnergyECM - eCM.energyECM.totalEnergyECMOld)/dt ; //dt=dt ; // max (minDt, maxDt/sqrt( 1 +adaptiveLevelCoef*pow(eCM.energyECM.totalEnergyPrimeECM,2))) ; //dt=max (minDt, maxDt/sqrt( 1 +pow(adaptiveLevelCoef*eCM.energyECM.totalEnergyPrimeECM,2))) ; //} void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 kContractMemb = globalConfigVars.getConfigValue("KContractMemb").toDouble(); //Ali membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that out safely on 04/04/2017 } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 // this constructor is not active SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput,SceECM* eCMInput, Solver * solver, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, std::vector<ECellType> &eCellTypeV1, double InitTimeStage) { curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); double simulationTotalTime = globalConfigVars.getConfigValue("SimulationTotalTime").toDouble(); double simulationTimeStep = globalConfigVars.getConfigValue("SimulationTimeStep").toDouble(); int TotalNumOfOutputFrames = globalConfigVars.getConfigValue("TotalNumOfOutputFrames").toInt(); std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; isInitNucPercentCalculated=false ; isBasalActinPresent=true ; isCellGrowSet=false ; cout <<" Basal actinomyosin is active on pouch cells" << endl ; addNode=true ; cout << " addNode boolean is initialized " <<addNode <<endl ; relaxCount=0 ; freqPlotData=int ( (simulationTotalTime-InitTimeStage)/(simulationTimeStep*TotalNumOfOutputFrames) ) ; memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); cout << "relax count is initialized as" << relaxCount << endl ; initialize_M(nodesInput, eCMInput, solver); copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec, eCellTypeV1); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { cellInfoVecs.daughterCellProduced.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.distFromNucleus_normal.resize(allocPara_m.maxCellCount,0.0); cellInfoVecs.distFromNucleus_normal_apical.resize(allocPara_m.maxCellCount,0.0); cellInfoVecs.individualCellHeight.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.individualCellHeight_springHeight.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.distFromBasalLoc.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.distFromApicalLoc.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.contractActomyo_multip_perCell.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.contractActomyo_multip_apical_perCell.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.numApicalVec.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.numBasalVec.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.cellRankVec.resize(allocPara_m.maxCellCount, 0); //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.contractileSpringGrowthProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaGrowthProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaGrowthProgressNonMitotic.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.InternalAvgX.resize(allocPara_m.maxCellCount); //cellInfoVecs.InternalAvgIniX.resize(allocPara_m.maxCellCount); cellInfoVecs.tmpShiftVecX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.InternalAvgY.resize(allocPara_m.maxCellCount); //cellInfoVecs.InternalAvgIniY.resize(allocPara_m.maxCellCount); cellInfoVecs.tmpShiftVecY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.apicalLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.apicalLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.basalLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.basalLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.eCMNeighborId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.nucleusLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusDesireLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusDesireLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucDesireDistApical.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.cellCenterDesireDistApical.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.cellCenterPerturbedLocLocX.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.cellCenterPerturbedLocLocY.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.nucleusLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusLocPercent.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.apicalNodeCount.resize(allocPara_m.maxCellCount,0); //Ali cellInfoVecs.basalNodeCount.resize(allocPara_m.maxCellCount,0); //Ali cellInfoVecs.ringApicalId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.ringBasalId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.sumLagrangeFPerCellX.resize(allocPara_m.maxCellCount,0.0); //Ali cellInfoVecs.sumLagrangeFPerCellY.resize(allocPara_m.maxCellCount,0.0); //Ali cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.cellRanksTmpStorage1.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrRemovingNode.resize(allocPara_m.maxCellCount, false); // Ali cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.minTenIndxVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenIndxTypeVec.resize(allocPara_m.maxCellCount); cellInfoVecs.minDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI cellInfoVecs.cellPressure.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.sumF_MI_M_N.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.sumLagrangeFN.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.eCellTypeV2.resize(allocPara_m.maxCellCount, notActive);//Ali //cellInfoVecs.eCellTypeV2Host.resize(allocPara_m.maxCellCount, notActive);//Ali cellInfoVecs.cellRoot.resize(allocPara_m.maxCellCount, -1);//Ali thrust:: sequence (cellInfoVecs.cellRoot.begin(),cellInfoVecs.cellRoot.begin()+allocPara_m.currentActiveCellCount) ; //Ali std::cout << "initial number of active cells is " <<allocPara_m.currentActiveCellCount <<std::endl; std::cout <<"last cell rank used in the cell root is " <<cellInfoVecs.cellRoot[allocPara_m.currentActiveCellCount-1] << endl ; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeLocXApical.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocYApical.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocXBasal.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocYBasal.resize(allocPara_m.maxTotalNodeCount); //Ali } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.memNodeType1Address = thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[allocPara_m.bdryNodeCount])); //Ali growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput, SceECM *eCMInput, Solver *solver) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; //pointer assigned eCMPointerCells=eCMInput ; //pointer assigned solverPointer=solver ; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); // It copies the controlPara from nstance of class SceNodes to the instance of class of SceCells //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double & dt, double Damp_Coef, double InitTimeStage, double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_ECM, double timeRatio_Crit_Division, double volume_Increase_Target_Ratio, double volume_Increase_Scale, double postDivision_restorationRateScale, int cycle, double distFromNucleus_max, double distFromNucleus_min, double distFromNucleus_normalMax1,double distFromNucleus_normalMax2,double distFromNucleus_normalMax3, double distFromNucleus_normalMax_apical1, double distFromNucleus_normalMax_apical2, double distFromNucleus_normalMax_apical3, double percentage_before_timeRatio_Crit_Division_scaling, double growthProgressSpeed, int maxApicalBasalNodeNum, double maxLengthToAddMemNodes, double mitoRndActomyoStrengthScaling, double thresholdToIntroduceNewCell, double mitoticThreshold) { //Ali // double time_Ratio = current_Time/total_Time; // if (timeRatio == timeRatio_Crit_Division){ // std::cout<<"timeRatio = "<<timeRatio<<std::endl; // } // if (cycle < 0){ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } // else if (cycle >= 10){ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } // if (cycle == 0 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 31; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 1 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 25;//33; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 2 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 6;//27; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 3 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 10;//29; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 4 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 61;//35; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 5 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 30;//86; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 6 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 87; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 7 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 5;//88; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 8 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 42;//89; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 9 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 37;//90; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else{ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } #ifdef debugModeECM hipEvent_t start1, start2, start3, start4, start5, start6, start7, start8, start9, start10, start11, start12, start13, stop; float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ; float elapsedTime9, elapsedTime10, elapsedTime11, elapsedTime12, elapsedTime13 ; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&start4); hipEventCreate(&start5); hipEventCreate(&start6); hipEventCreate(&start7); hipEventCreate(&start8); hipEventCreate(&start9); hipEventCreate(&start10); hipEventCreate(&start11); hipEventCreate(&start12); hipEventCreate(&start13); hipEventCreate(&stop); hipEventRecord(start1, 0); #endif // std::cout << " *** 1 ***" << endl; this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay = exp(-curTime * miscPara.prolifDecayCoeff); //cout<< "Current Time in simulation is: "<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; bool cellPolar=true ; bool subCellPolar= true ; // std::cout << " *** 2 ***" << endl; if (curTime>500000) { // eCMPointerCells->SetIfECMIsRemoved(false) ; // isBasalActinPresent=false ; // nodes->SetApicalAdhPresence(true) ; } if (curTime==InitTimeStage) { lastPrintNucleus=10000000 ; //just a big number outputFrameNucleus=0 ; // computeInternalAvgPos_M(); nodes->isInitPhase=false ; // This bool variable is not active in the code anymore string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string cSVFileName = "EnergyExportCell_" + uniqueSymbolOutput + ".CSV"; ofstream EnergyExportCell ; EnergyExportCell.open(cSVFileName.c_str() ); EnergyExportCell <<"curTime"<<","<<"totalMembrLinSpringEnergyCell" << "," <<"totalMembrBendSpringEnergyCell" <<"," << "totalNodeIIEnergyCell"<<"," <<"totalNodeIMEnergyCell"<<","<<"totalNodeEnergyCell"<< std::endl; // Adding cell cycle timer information here std::vector<double> growthProgressReinitialize; growthProgressReinitialize.push_back(-9999.9);//0.0163);//Cell0 growthProgressReinitialize.push_back(-9999.9);//0.0181);//Cell1 // Ver7 // growthProgressReinitialize.push_back( -99999.9); // growthProgressReinitialize.push_back( -99999.9); // growthProgressReinitialize.push_back( -0.58); // growthProgressReinitialize.push_back( -2.69); // growthProgressReinitialize.push_back( -1.2); // growthProgressReinitialize.push_back( 0.1100); // growthProgressReinitialize.push_back( -2.978); // growthProgressReinitialize.push_back( -1.0070); // growthProgressReinitialize.push_back( -2.8052); // growthProgressReinitialize.push_back( -2.0354); // growthProgressReinitialize.push_back( -1.1824); // growthProgressReinitialize.push_back( -0.6577); // growthProgressReinitialize.push_back( 0.6080); // growthProgressReinitialize.push_back( -1.0282); // growthProgressReinitialize.push_back( -0.2033); // growthProgressReinitialize.push_back( -2.34); // growthProgressReinitialize.push_back( 0.0127); // growthProgressReinitialize.push_back( -0.2035); // growthProgressReinitialize.push_back( -1.299); // growthProgressReinitialize.push_back( -2.703); // growthProgressReinitialize.push_back( -1.562); // growthProgressReinitialize.push_back( -2.750); // growthProgressReinitialize.push_back( -0.3286); // growthProgressReinitialize.push_back( -2.083); // growthProgressReinitialize.push_back( -2.79); // growthProgressReinitialize.push_back( -1.1567); // growthProgressReinitialize.push_back( -0.5034); // growthProgressReinitialize.push_back( -1.9003); // growthProgressReinitialize.push_back( 0.4964); // growthProgressReinitialize.push_back( -0.4520); // growthProgressReinitialize.push_back( -2.002); // growthProgressReinitialize.push_back( -1.000); // growthProgressReinitialize.push_back( -1.880); // growthProgressReinitialize.push_back( 0.3719); // growthProgressReinitialize.push_back( -0.7133); // growthProgressReinitialize.push_back( -1.172); // growthProgressReinitialize.push_back( 0.0251); // growthProgressReinitialize.push_back( -2.323); // growthProgressReinitialize.push_back( -1.960); // growthProgressReinitialize.push_back( -0.1294); // growthProgressReinitialize.push_back( 0.2848); // growthProgressReinitialize.push_back( -2.912); // growthProgressReinitialize.push_back( 0.2526); // growthProgressReinitialize.push_back( -2.165); // growthProgressReinitialize.push_back( -1.031); // growthProgressReinitialize.push_back( -0.7257); // growthProgressReinitialize.push_back( -2.087); // growthProgressReinitialize.push_back( -1.018); // growthProgressReinitialize.push_back( 0.0391); // growthProgressReinitialize.push_back( -2.1332); // growthProgressReinitialize.push_back( -3.2330); // growthProgressReinitialize.push_back( -0.3449); // growthProgressReinitialize.push_back( -2.0334); // growthProgressReinitialize.push_back( -0.0101); // growthProgressReinitialize.push_back( 0.4452); // growthProgressReinitialize.push_back( -2.013); // growthProgressReinitialize.push_back( 0.0002); // growthProgressReinitialize.push_back( -1.048); // growthProgressReinitialize.push_back( 0.2862); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); growthProgressReinitialize.push_back( -9999.9); //Cell 2 growthProgressReinitialize.push_back( -9999.9); //Cell 3 // //Ver8 // std::cout<<"growthProgress initial profile Ver. 8"<<std::endl; // growthProgressReinitialize.push_back( -1.0825); // growthProgressReinitialize.push_back( -3.4494); // growthProgressReinitialize.push_back( -2.0672); // growthProgressReinitialize.push_back( -2.8107); // growthProgressReinitialize.push_back( -0.1243); // growthProgressReinitialize.push_back( -2.1773); // growthProgressReinitialize.push_back( -1.2537); // growthProgressReinitialize.push_back( -2.7960); // growthProgressReinitialize.push_back( -0.9416); // growthProgressReinitialize.push_back( -2.3824); // growthProgressReinitialize.push_back( -0.7202); // growthProgressReinitialize.push_back( -1.5852); // growthProgressReinitialize.push_back( -3.1438); // growthProgressReinitialize.push_back( -2.5268); // growthProgressReinitialize.push_back( 0.3817); // growthProgressReinitialize.push_back( -2.8524); // growthProgressReinitialize.push_back( 0.0097); // growthProgressReinitialize.push_back( -1.2120); // growthProgressReinitialize.push_back( 0.7336); // growthProgressReinitialize.push_back( -3.1678); // growthProgressReinitialize.push_back( -1.6186); // growthProgressReinitialize.push_back( -3.0467); // growthProgressReinitialize.push_back( 0.5881); // growthProgressReinitialize.push_back( -3.4803); // growthProgressReinitialize.push_back( -0.2066); // growthProgressReinitialize.push_back( -3.1411); // growthProgressReinitialize.push_back( -1.8009); // growthProgressReinitialize.push_back( -2.3956); // growthProgressReinitialize.push_back( -0.0997); // growthProgressReinitialize.push_back( -1.6665); // growthProgressReinitialize.push_back( 0.3703); // growthProgressReinitialize.push_back( -2.7272); // growthProgressReinitialize.push_back( 0.1945); // growthProgressReinitialize.push_back( -1.0363); // growthProgressReinitialize.push_back( -2.8839); // growthProgressReinitialize.push_back( 0.1254); // growthProgressReinitialize.push_back( -0.8563); // growthProgressReinitialize.push_back( -2.0085); // growthProgressReinitialize.push_back( -1.3187); // growthProgressReinitialize.push_back( -3.1771); // growthProgressReinitialize.push_back( -2.4804); // growthProgressReinitialize.push_back( -1.7266); // growthProgressReinitialize.push_back( -3.2890); // growthProgressReinitialize.push_back( 0.3365); // growthProgressReinitialize.push_back( -1.4138); // growthProgressReinitialize.push_back( -2.0647); // growthProgressReinitialize.push_back( 0.3252); // growthProgressReinitialize.push_back( -1.9307); // growthProgressReinitialize.push_back( -3.0274); // growthProgressReinitialize.push_back( -0.1839); // growthProgressReinitialize.push_back( -1.8436); // growthProgressReinitialize.push_back( -2.4728); // growthProgressReinitialize.push_back( -1.7834); // growthProgressReinitialize.push_back( -3.0901); // growthProgressReinitialize.push_back( 0.5037); // growthProgressReinitialize.push_back( -1.0554); // growthProgressReinitialize.push_back( -3.2459); // //Ver9 // std::cout<<"growthProgress initial profile Ver. 9"<<std::endl; // growthProgressReinitialize.push_back(-1.3519); // growthProgressReinitialize.push_back(-0.2639); // growthProgressReinitialize.push_back(-3.1180); // growthProgressReinitialize.push_back(0.7373); // growthProgressReinitialize.push_back(-1.8542); // growthProgressReinitialize.push_back(-0.4155); // growthProgressReinitialize.push_back(-3.1281); // growthProgressReinitialize.push_back(0.6909); // growthProgressReinitialize.push_back(-1.9936); // growthProgressReinitialize.push_back(-0.6243); // growthProgressReinitialize.push_back(-3.1490); // growthProgressReinitialize.push_back(0.7337); // growthProgressReinitialize.push_back(-1.9932); // growthProgressReinitialize.push_back(-0.4141); // growthProgressReinitialize.push_back(-3.4583); // growthProgressReinitialize.push_back(0.7396); // growthProgressReinitialize.push_back(-1.4360); // growthProgressReinitialize.push_back(-0.3950); // growthProgressReinitialize.push_back(-3.3643); // growthProgressReinitialize.push_back(0.6351); // growthProgressReinitialize.push_back(-1.9657); // growthProgressReinitialize.push_back(-0.8638); // growthProgressReinitialize.push_back(-3.4182); // growthProgressReinitialize.push_back(0.6429); // growthProgressReinitialize.push_back(-2.3591); // growthProgressReinitialize.push_back(-0.3721); // growthProgressReinitialize.push_back(-3.4084); // growthProgressReinitialize.push_back(0.6688); // growthProgressReinitialize.push_back(-2.2819); // growthProgressReinitialize.push_back(-0.9605); // growthProgressReinitialize.push_back(-3.3901); // growthProgressReinitialize.push_back(0.4828); // growthProgressReinitialize.push_back(-2.2890); // growthProgressReinitialize.push_back(-0.6388); // growthProgressReinitialize.push_back(-3.4209); // growthProgressReinitialize.push_back(0.5414); // growthProgressReinitialize.push_back(-1.7997); // growthProgressReinitialize.push_back(-0.5446); // growthProgressReinitialize.push_back(-2.8349); // growthProgressReinitialize.push_back(0.5172); // growthProgressReinitialize.push_back(-1.5476); // growthProgressReinitialize.push_back(-0.4978); // growthProgressReinitialize.push_back(-3.1851); // growthProgressReinitialize.push_back(0.5574); // growthProgressReinitialize.push_back(-1.6700); // growthProgressReinitialize.push_back(-0.6098); // growthProgressReinitialize.push_back(-3.3590); // growthProgressReinitialize.push_back(0.5648); // growthProgressReinitialize.push_back(-1.9684); // growthProgressReinitialize.push_back(-0.5266); // growthProgressReinitialize.push_back(-2.9763); // growthProgressReinitialize.push_back(0.5215); // growthProgressReinitialize.push_back(-1.8311); // growthProgressReinitialize.push_back(-0.6464); // growthProgressReinitialize.push_back(-3.2580); // growthProgressReinitialize.push_back(0.6640); // growthProgressReinitialize.push_back(-2.0480); // //Ver10 // std::cout<<"Growth-progress profile Ver 10 is used"<<std::endl; // growthProgressReinitialize.push_back( -1.9657); // growthProgressReinitialize.push_back( -0.9376); // growthProgressReinitialize.push_back( 0.3190); // growthProgressReinitialize.push_back( -2.3226); // growthProgressReinitialize.push_back( -0.9976); // growthProgressReinitialize.push_back( 0.6312); // growthProgressReinitialize.push_back( -2.4786); // growthProgressReinitialize.push_back( -0.4733); // growthProgressReinitialize.push_back( 0.6356); // growthProgressReinitialize.push_back( -2.4616); // growthProgressReinitialize.push_back( -1.3734); // growthProgressReinitialize.push_back( -0.0702); // growthProgressReinitialize.push_back( -2.4089); // growthProgressReinitialize.push_back( -1.4015); // growthProgressReinitialize.push_back( -0.0385); // growthProgressReinitialize.push_back( -2.4974); // growthProgressReinitialize.push_back( -1.2876); // growthProgressReinitialize.push_back( 0.1130); // growthProgressReinitialize.push_back( -2.4161); // growthProgressReinitialize.push_back( -1.3892); // growthProgressReinitialize.push_back( -0.0828); // growthProgressReinitialize.push_back( -2.4240); // growthProgressReinitialize.push_back( -1.1015); // growthProgressReinitialize.push_back( 0.0612); // growthProgressReinitialize.push_back( -2.2057); // growthProgressReinitialize.push_back( -1.1241); // growthProgressReinitialize.push_back( 0.7000); // growthProgressReinitialize.push_back( -2.2458); // growthProgressReinitialize.push_back( -1.2208); // growthProgressReinitialize.push_back( -0.1874); // growthProgressReinitialize.push_back( -2.2313); // growthProgressReinitialize.push_back( -1.2195); // growthProgressReinitialize.push_back( 0.1352); // growthProgressReinitialize.push_back( -2.3796); // growthProgressReinitialize.push_back( -0.9151); // growthProgressReinitialize.push_back( 0.7106); // growthProgressReinitialize.push_back( -1.9618); // growthProgressReinitialize.push_back( -0.7738); // growthProgressReinitialize.push_back( 0.4064); // growthProgressReinitialize.push_back( -1.7924); // growthProgressReinitialize.push_back( -0.6859); // growthProgressReinitialize.push_back( 0.5891); // growthProgressReinitialize.push_back( -1.9121); // growthProgressReinitialize.push_back( -0.5693); // growthProgressReinitialize.push_back( 0.7430); // growthProgressReinitialize.push_back( -2.2218); // growthProgressReinitialize.push_back( -1.0864); // growthProgressReinitialize.push_back( 0.1956); // growthProgressReinitialize.push_back( -2.1195); // growthProgressReinitialize.push_back( -0.8659); // growthProgressReinitialize.push_back( 0.2405); // growthProgressReinitialize.push_back( -2.4792); // growthProgressReinitialize.push_back( -1.1303); // growthProgressReinitialize.push_back( 0.3634); // growthProgressReinitialize.push_back( -2.2099); // growthProgressReinitialize.push_back( -1.0004); // growthProgressReinitialize.push_back( 0.2268); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back(-9999.9);//0.0140); // growthProgressReinitialize.push_back(-9999.9);//0.0178); // growthProgressReinitialize.push_back(-9999.9);//0.0192); // growthProgressReinitialize.push_back(-9999.9);//0.0109); // growthProgressReinitialize.push_back(-9999.9);//0.0028); // growthProgressReinitialize.push_back(-9999.9);//0.0030); // growthProgressReinitialize.push_back(-9999.9);//0.0052); // growthProgressReinitialize.push_back(-9999.9);//0.0168); // growthProgressReinitialize.push_back(-9999.9);//0.0051); // growthProgressReinitialize.push_back(-9999.9);//0.0163); // growthProgressReinitialize.push_back(-9999.9);//0.0049); // growthProgressReinitialize.push_back(-9999.9);//0.0186); // growthProgressReinitialize.push_back(-9999.9);//0.0070); // growthProgressReinitialize.push_back(-9999.9);//0.0039); // growthProgressReinitialize.push_back(-9999.9);//0.0050); // growthProgressReinitialize.push_back(-9999.9);//0.0123); // growthProgressReinitialize.push_back(-9999.9);//0.0095); // growthProgressReinitialize.push_back(-9999.9);//0.0070); // growthProgressReinitialize.push_back(-9999.9);//0.0166); // growthProgressReinitialize.push_back(-9999.9);//0.0117); // growthProgressReinitialize.push_back(-9999.9);//0.0110); // growthProgressReinitialize.push_back(-9999.9);//0.0183); // growthProgressReinitialize.push_back(-9999.9);//0.0057); // for (int w = 0; w < allocPara_m.maxCellCount; w++){ // if (w < allocPara_m.currentActiveCellCount){ // cellInfoVecs.growthProgress[w] = growthProgressReinitialize[w]; // } // else{ // cellInfoVecs.growthProgress[w] = 0.0; // } // } // //Ver11 // std::cout<<"Growth-progress profile Ver 11 is used"<<std::endl; // growthProgressReinitialize.push_back( 0.8821); // growthProgressReinitialize.push_back( -0.5595); // growthProgressReinitialize.push_back( -0.0401); // growthProgressReinitialize.push_back( 0.4962); // growthProgressReinitialize.push_back( -0.6362); // growthProgressReinitialize.push_back( -0.0459); // growthProgressReinitialize.push_back( 0.6200); // growthProgressReinitialize.push_back( -0.6327); // growthProgressReinitialize.push_back( -0.0860); // growthProgressReinitialize.push_back( 0.6928); // growthProgressReinitialize.push_back( -0.7430); // growthProgressReinitialize.push_back( -0.1503); // growthProgressReinitialize.push_back( 0.4501); // growthProgressReinitialize.push_back( -0.6755); // growthProgressReinitialize.push_back( -0.1654); // growthProgressReinitialize.push_back( 0.6631); // growthProgressReinitialize.push_back( -0.7269); // growthProgressReinitialize.push_back( -0.0736); // growthProgressReinitialize.push_back( 0.6441); // growthProgressReinitialize.push_back( -0.5833); // growthProgressReinitialize.push_back( 0.1423); // growthProgressReinitialize.push_back( 0.6433); // growthProgressReinitialize.push_back( -0.3941); // growthProgressReinitialize.push_back( 0.1151); // growthProgressReinitialize.push_back( 0.7180); // growthProgressReinitialize.push_back( -0.6606); // growthProgressReinitialize.push_back( 0.0325); // growthProgressReinitialize.push_back( 0.8795); // growthProgressReinitialize.push_back( -0.6248); // growthProgressReinitialize.push_back( 0.0403); // growthProgressReinitialize.push_back( 0.7261); // growthProgressReinitialize.push_back( -0.4992); // growthProgressReinitialize.push_back( 0.0718); // growthProgressReinitialize.push_back( 0.6641); // growthProgressReinitialize.push_back( -0.4828); // growthProgressReinitialize.push_back( 0.0918); // growthProgressReinitialize.push_back( 0.7225); // growthProgressReinitialize.push_back( -0.5154); // growthProgressReinitialize.push_back( 0.2171); // growthProgressReinitialize.push_back( 0.7240); // growthProgressReinitialize.push_back( -0.5883); // growthProgressReinitialize.push_back( 0.1415); // growthProgressReinitialize.push_back( 0.7030); // growthProgressReinitialize.push_back( -0.5916); // growthProgressReinitialize.push_back( 0.1664); // growthProgressReinitialize.push_back( 0.6865); // growthProgressReinitialize.push_back( -0.3889); // growthProgressReinitialize.push_back( 0.1533); // growthProgressReinitialize.push_back( 0.6678); // growthProgressReinitialize.push_back( -0.4366); // growthProgressReinitialize.push_back( 0.1610); // growthProgressReinitialize.push_back( 0.7406); // growthProgressReinitialize.push_back( -0.4220); // growthProgressReinitialize.push_back( 0.1918); // growthProgressReinitialize.push_back( 0.7091); // growthProgressReinitialize.push_back( -0.4620); // growthProgressReinitialize.push_back( 0.0381); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); //Ver11 std::cout<<"Growth-progress profile Ver 11 is used"<<std::endl; growthProgressReinitialize.push_back( -9999.9);//Cell4 growthProgressReinitialize.push_back( -0.5595);//Cell5 growthProgressReinitialize.push_back( -0.0401);//Cell6 growthProgressReinitialize.push_back( 0.4962);//Cell7 growthProgressReinitialize.push_back( -0.6362);//Cell8 growthProgressReinitialize.push_back( -0.0459);//Cell9 growthProgressReinitialize.push_back( 0.6200);//Cell10 growthProgressReinitialize.push_back( -0.6327);//Cell11 growthProgressReinitialize.push_back( -0.0860);//Cell12 growthProgressReinitialize.push_back( 0.6928);//Cell13 growthProgressReinitialize.push_back( -0.7430);//Cell14 growthProgressReinitialize.push_back( -0.1503);//Cell15 growthProgressReinitialize.push_back( 0.4501);//Cell16 growthProgressReinitialize.push_back( -0.6755);//Cell17 growthProgressReinitialize.push_back( -0.1654);//Cell18 growthProgressReinitialize.push_back( 0.6631);//Cell19 growthProgressReinitialize.push_back( -0.7269);//Cell20 growthProgressReinitialize.push_back( -0.0736);//Cell21 growthProgressReinitialize.push_back( 0.6441);//Cell22 growthProgressReinitialize.push_back( -0.5833);//Cell23 growthProgressReinitialize.push_back( 0.1423);//Cell24 growthProgressReinitialize.push_back( 0.6433);//Cell25 growthProgressReinitialize.push_back( -0.3941);//Cell26 growthProgressReinitialize.push_back( 0.1151);//Cell27 growthProgressReinitialize.push_back( 0.7180);//Cell28 growthProgressReinitialize.push_back( -0.6606);//Cell29 growthProgressReinitialize.push_back( 0.0325);//Cell30 growthProgressReinitialize.push_back( 0.8900);//Cell31 growthProgressReinitialize.push_back( -0.6248);//Cell32 growthProgressReinitialize.push_back( 0.0403);//Cell33 growthProgressReinitialize.push_back( 0.7261);//Cell34 growthProgressReinitialize.push_back( -0.4992);//Cell35 growthProgressReinitialize.push_back( 0.0718);//Cell36 growthProgressReinitialize.push_back( 0.6641);//Cell37 growthProgressReinitialize.push_back( -0.4828);//Cell38 growthProgressReinitialize.push_back( 0.0918);//Cell39 growthProgressReinitialize.push_back( 0.7225);//Cell40 growthProgressReinitialize.push_back( -0.5154);//Cell41 growthProgressReinitialize.push_back( 0.2171);//Cell42 growthProgressReinitialize.push_back( 0.7240);//Cell43 growthProgressReinitialize.push_back( -0.5883);//Cell44 growthProgressReinitialize.push_back( 0.1415);//Cell45 growthProgressReinitialize.push_back( 0.7030);//Cell46 growthProgressReinitialize.push_back( -0.5916);//Cell47 growthProgressReinitialize.push_back( 0.1664);//Cell48 growthProgressReinitialize.push_back( 0.6865);//Cell49 growthProgressReinitialize.push_back( -0.3889);//Cell50 growthProgressReinitialize.push_back( 0.1533);//Cell51 growthProgressReinitialize.push_back( 0.6678);//Cell52 growthProgressReinitialize.push_back( -0.4366);//Cell53 growthProgressReinitialize.push_back( 0.1610);//Cell54 growthProgressReinitialize.push_back( 0.7406);//Cell55 growthProgressReinitialize.push_back( -0.4220);//Cell56 growthProgressReinitialize.push_back( 0.1918);//Cell57 growthProgressReinitialize.push_back( 0.7091);//Cell58 growthProgressReinitialize.push_back( -0.4620);//Cell59 growthProgressReinitialize.push_back( 0.0381);//Cell60 growthProgressReinitialize.push_back( -9999.9);//Cell61 growthProgressReinitialize.push_back( -9999.9);//Cell62 growthProgressReinitialize.push_back(-9999.9);//0.0140); growthProgressReinitialize.push_back(-9999.9);//0.0178); growthProgressReinitialize.push_back(-9999.9);//0.0192); growthProgressReinitialize.push_back(-9999.9);//0.0109); growthProgressReinitialize.push_back(-9999.9);//0.0028); growthProgressReinitialize.push_back(-9999.9);//0.0030); growthProgressReinitialize.push_back(-9999.9);//0.0052); growthProgressReinitialize.push_back(-9999.9);//0.0168); growthProgressReinitialize.push_back(-9999.9);//0.0051); growthProgressReinitialize.push_back(-9999.9);//0.0163); growthProgressReinitialize.push_back(-9999.9);//0.0049); growthProgressReinitialize.push_back(-9999.9);//0.0186); growthProgressReinitialize.push_back(-9999.9);//0.0070); growthProgressReinitialize.push_back(-9999.9);//0.0039); growthProgressReinitialize.push_back(-9999.9);//0.0050); growthProgressReinitialize.push_back(-9999.9);//0.0123); growthProgressReinitialize.push_back(-9999.9);//0.0095); growthProgressReinitialize.push_back(-9999.9);//0.0070); growthProgressReinitialize.push_back(-9999.9);//0.0166); growthProgressReinitialize.push_back(-9999.9);//0.0117); growthProgressReinitialize.push_back(-9999.9);//0.0110); growthProgressReinitialize.push_back(-9999.9);//0.0183); growthProgressReinitialize.push_back(-9999.9);//0.0057); for (int w = 0; w < allocPara_m.maxCellCount; w++){ if (w < allocPara_m.currentActiveCellCount){ cellInfoVecs.growthProgress[w] = growthProgressReinitialize[w]; } else{ cellInfoVecs.growthProgress[w] = 0.0; } } // Adding actin level (number of contractile springs) information here std::vector<double> distFromNucleus_normalMaxVec; distFromNucleus_normalMaxVec.push_back(-9999.9999);//CellID0 distFromNucleus_normalMaxVec.push_back(-9999.9999);//CellID1 distFromNucleus_normalMaxVec.push_back(0.197835294); distFromNucleus_normalMaxVec.push_back(0.197882353); distFromNucleus_normalMaxVec.push_back(0.216423529); distFromNucleus_normalMaxVec.push_back(0.2248 ); distFromNucleus_normalMaxVec.push_back(0.223623529); distFromNucleus_normalMaxVec.push_back(0.248752941); distFromNucleus_normalMaxVec.push_back(0.251717647); distFromNucleus_normalMaxVec.push_back(0.281788235); distFromNucleus_normalMaxVec.push_back(0.284329412); distFromNucleus_normalMaxVec.push_back(0.284094118); distFromNucleus_normalMaxVec.push_back(0.282776471); distFromNucleus_normalMaxVec.push_back(0.2968 ); distFromNucleus_normalMaxVec.push_back(0.299294118); distFromNucleus_normalMaxVec.push_back(0.3 ); distFromNucleus_normalMaxVec.push_back(0.292658824); distFromNucleus_normalMaxVec.push_back(0.292329412); distFromNucleus_normalMaxVec.push_back(0.284847059); distFromNucleus_normalMaxVec.push_back(0.292564706); distFromNucleus_normalMaxVec.push_back(0.274823529); distFromNucleus_normalMaxVec.push_back(0.242776471); distFromNucleus_normalMaxVec.push_back(0.247529412); distFromNucleus_normalMaxVec.push_back(0.254682353); distFromNucleus_normalMaxVec.push_back(0.224658824); distFromNucleus_normalMaxVec.push_back(0.234776471); distFromNucleus_normalMaxVec.push_back(0.263105882); distFromNucleus_normalMaxVec.push_back(0.236988235); distFromNucleus_normalMaxVec.push_back(0.216423529); distFromNucleus_normalMaxVec.push_back(0.240658824); distFromNucleus_normalMaxVec.push_back(0.196941176); distFromNucleus_normalMaxVec.push_back(0.2184 ); distFromNucleus_normalMaxVec.push_back(0.232094118); distFromNucleus_normalMaxVec.push_back(0.238070588); distFromNucleus_normalMaxVec.push_back(0.248329412); distFromNucleus_normalMaxVec.push_back(0.244611765); distFromNucleus_normalMaxVec.push_back(0.230070588); distFromNucleus_normalMaxVec.push_back(0.221411765); distFromNucleus_normalMaxVec.push_back(0.2352 ); distFromNucleus_normalMaxVec.push_back(0.198305882); distFromNucleus_normalMaxVec.push_back(0.201552941); distFromNucleus_normalMaxVec.push_back(0.2176 ); distFromNucleus_normalMaxVec.push_back(0.210964706); distFromNucleus_normalMaxVec.push_back(0.233082353); distFromNucleus_normalMaxVec.push_back(0.228141176); distFromNucleus_normalMaxVec.push_back(0.236564706); distFromNucleus_normalMaxVec.push_back(0.244141176); distFromNucleus_normalMaxVec.push_back(0.2232 ); distFromNucleus_normalMaxVec.push_back(0.229741176); distFromNucleus_normalMaxVec.push_back(0.220847059); distFromNucleus_normalMaxVec.push_back(0.214164706); distFromNucleus_normalMaxVec.push_back(0.212376471); distFromNucleus_normalMaxVec.push_back(0.218870588); distFromNucleus_normalMaxVec.push_back(0.245129412); distFromNucleus_normalMaxVec.push_back(0.230964706); distFromNucleus_normalMaxVec.push_back(0.252047059); distFromNucleus_normalMaxVec.push_back(0.243576471); distFromNucleus_normalMaxVec.push_back(0.254870588); distFromNucleus_normalMaxVec.push_back(0.245976471); distFromNucleus_normalMaxVec.push_back(0.209270588); distFromNucleus_normalMaxVec.push_back(0.195529412); distFromNucleus_normalMaxVec.push_back(0.194588235); distFromNucleus_normalMaxVec.push_back(0.215482353);//CellID62 std::vector<double> distFromNucleus_normalMaxVec_apical; distFromNucleus_normalMaxVec_apical.push_back(-9999.9999);//CellID0 distFromNucleus_normalMaxVec_apical.push_back(-9999.9999);//CellID1 distFromNucleus_normalMaxVec_apical.push_back(0.260141176); distFromNucleus_normalMaxVec_apical.push_back(0.255152941); distFromNucleus_normalMaxVec_apical.push_back(0.236658824); distFromNucleus_normalMaxVec_apical.push_back(0.236470588); distFromNucleus_normalMaxVec_apical.push_back(0.242776471); distFromNucleus_normalMaxVec_apical.push_back(0.2408 ); distFromNucleus_normalMaxVec_apical.push_back(0.245835294); distFromNucleus_normalMaxVec_apical.push_back(0.250211765); distFromNucleus_normalMaxVec_apical.push_back(0.257741176); distFromNucleus_normalMaxVec_apical.push_back(0.248423529); distFromNucleus_normalMaxVec_apical.push_back(0.251058824); distFromNucleus_normalMaxVec_apical.push_back(0.247623529); distFromNucleus_normalMaxVec_apical.push_back(0.279058824); distFromNucleus_normalMaxVec_apical.push_back(0.276235294); distFromNucleus_normalMaxVec_apical.push_back(0.261505882); distFromNucleus_normalMaxVec_apical.push_back(0.266117647); distFromNucleus_normalMaxVec_apical.push_back(0.245788235); distFromNucleus_normalMaxVec_apical.push_back(0.249270588); distFromNucleus_normalMaxVec_apical.push_back(0.253082353); distFromNucleus_normalMaxVec_apical.push_back(0.253694118); distFromNucleus_normalMaxVec_apical.push_back(0.277223529); distFromNucleus_normalMaxVec_apical.push_back(0.261270588); distFromNucleus_normalMaxVec_apical.push_back(0.252376471); distFromNucleus_normalMaxVec_apical.push_back(0.259341176); distFromNucleus_normalMaxVec_apical.push_back(0.245223529); distFromNucleus_normalMaxVec_apical.push_back(0.251435294); distFromNucleus_normalMaxVec_apical.push_back(0.262635294); distFromNucleus_normalMaxVec_apical.push_back(0.297552941); distFromNucleus_normalMaxVec_apical.push_back(0.276564706); distFromNucleus_normalMaxVec_apical.push_back(0.283529412); distFromNucleus_normalMaxVec_apical.push_back(0.289552941); distFromNucleus_normalMaxVec_apical.push_back(0.272141176); distFromNucleus_normalMaxVec_apical.push_back(0.2424 ); distFromNucleus_normalMaxVec_apical.push_back(0.243623529); distFromNucleus_normalMaxVec_apical.push_back(0.188235294); distFromNucleus_normalMaxVec_apical.push_back(0.257835294); distFromNucleus_normalMaxVec_apical.push_back(0.245223529); distFromNucleus_normalMaxVec_apical.push_back(0.226682353); distFromNucleus_normalMaxVec_apical.push_back(0.208282353); distFromNucleus_normalMaxVec_apical.push_back(0.211388235); distFromNucleus_normalMaxVec_apical.push_back(0.192470588); distFromNucleus_normalMaxVec_apical.push_back(0.1944 ); distFromNucleus_normalMaxVec_apical.push_back(0.194117647); distFromNucleus_normalMaxVec_apical.push_back(0.227623529); distFromNucleus_normalMaxVec_apical.push_back(0.224894118); distFromNucleus_normalMaxVec_apical.push_back(0.242917647); distFromNucleus_normalMaxVec_apical.push_back(0.215152941); distFromNucleus_normalMaxVec_apical.push_back(0.208423529); distFromNucleus_normalMaxVec_apical.push_back(0.208941176); distFromNucleus_normalMaxVec_apical.push_back(0.195717647); distFromNucleus_normalMaxVec_apical.push_back(0.204094118); distFromNucleus_normalMaxVec_apical.push_back(0.183764706); distFromNucleus_normalMaxVec_apical.push_back(0.217223529); distFromNucleus_normalMaxVec_apical.push_back(0.222870588); distFromNucleus_normalMaxVec_apical.push_back(0.226917647); distFromNucleus_normalMaxVec_apical.push_back(0.213223529); distFromNucleus_normalMaxVec_apical.push_back(0.221458824); distFromNucleus_normalMaxVec_apical.push_back(0.207011765); distFromNucleus_normalMaxVec_apical.push_back(0.201035294); distFromNucleus_normalMaxVec_apical.push_back(0.191858824); distFromNucleus_normalMaxVec_apical.push_back(0.191576471);//CellID62 for (int cellRank = 0; cellRank < allocPara_m.maxCellCount; cellRank++){ if (cellRank < allocPara_m.currentActiveCellCount){ // cellInfoVecs.distFromNucleus_normal[cellRank] = distFromNucleus_normalMax;//(distFromNucleus_normalMax) - (distFromNucleus_normalMax - (-14.0))*(1.0 - pow(distFromNucleus_normalVec[cellRank], 3.0)); if (cellRank >= 2 && cellRank <= 21){ cellInfoVecs.distFromNucleus_normal[cellRank] = 1.0*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 2){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else if (cellRank >= 22 && cellRank <= 42){ cellInfoVecs.distFromNucleus_normal[cellRank] = (1.0)*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 22){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else if (cellRank >= 43 && cellRank <= 62){ cellInfoVecs.distFromNucleus_normal[cellRank] = 1.0*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 43){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else{ cellInfoVecs.distFromNucleus_normal[cellRank] = -99999.9; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 99999.9; // std::cout<<"infoVecs.contractActomyo_multip["<<i<<"] = "<<infoVecs.contractActomyo_multip[i]<<std::endl; } } else{ cellInfoVecs.distFromNucleus_normal[cellRank] = -99999.9; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 99999.9; } } } curTime = curTime + dt; bool tmpIsInitPhase= nodes->isInitPhase ; // std::cout << " *** 3 ***" << endl; if (nodes->isMemNodeTypeAssigned==false) { assignMemNodeType(); // Ali cout << " I assigned boolean values for membrane node types " << endl; nodes->isMemNodeTypeAssigned=true ; } if (nodes->isMemNodeTypeAssigned_postCellDivision==false){ cout<<" I begin to reassign boolean values for membrane node types post-growth"<<endl; assignMemNodeType(); cout<<" I reassigned boolean values for membrane node types post-growth"<<endl; nodes->isMemNodeTypeAssigned_postCellDivision=true; } if (nodes->isMemNodeTypeAssigned_postAddNode==false){ cout<<" I begin to reassign boolean values for membrane node types post-growth"<<endl; assignMemNodeType(); cout<<" I reassigned boolean values for membrane node types post-growth"<<endl; nodes->isMemNodeTypeAssigned_postAddNode=true; } // std::cout << " *** 4 ***" << endl; #ifdef debugModeECM hipEventRecord(start2, 0); hipEventSynchronize(start2); hipEventElapsedTime(&elapsedTime1, start1, start2); #endif double contractileSpringGrowthProgressSpeed = 0.5*9.3405e-5;//9.3405e-5;//For minM14max4 //5.7081e-5;//For minM7max4 //7.2649e-05;//For minM7max7 double cellAreaGrowthProgressSpeed = 0.00022;//0.00011;//0.5*9.984e-5;//9.984e-5;//0.00002088;//0.02*0.000075/(4.0); double cellAreaGrowthProgressNonMitoticSpeed = 0.5*9.984e-5;//9.984e-5;//0.00002088;//0.02*0.000075/(4.0); double distFromBasalLocSpeed = 0.0001;//0.00005; double distFromApicalLocSpeed = 0.0001;//0.00005; if (timeRatio == 0){ std::cout<<"contractileSpringGrowthProgressSpeed = "<<contractileSpringGrowthProgressSpeed<<std::endl; std::cout<<"cellAreaGrowthProgressSpeed = "<<cellAreaGrowthProgressSpeed<<std::endl; std::cout<<"cellAreaGrowthProgressNonMitoticSpeed = "<<cellAreaGrowthProgressNonMitoticSpeed<<std::endl; std::cout<<"Calculating the number of apical node and basal node in each cell"<<std::endl; for (int i = 0; i < allocPara_m.maxCellCount*allocPara_m.maxAllNodePerCell; i++){ int cellRank = i/allocPara_m.maxAllNodePerCell; if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } for (int i = 0; i < allocPara_m.maxCellCount; i++){ cellInfoVecs.cellRankVec[i] = i; std::cout<<"Cell["<<i<<"] has "<<cellInfoVecs.numApicalVec[i]<<" apical nodes and "<<cellInfoVecs.numBasalVec[i]<<" basal nodes"<<std::endl; } } for (int w = 0; w < allocPara_m.currentActiveCellCount; w++){ if (cellInfoVecs.isEnteringMitotic[w] == true){ cellInfoVecs.contractileSpringGrowthProgress[w] += contractileSpringGrowthProgressSpeed;//0.00015; cellInfoVecs.cellAreaGrowthProgress[w] += cellAreaGrowthProgressSpeed; //0.00005; cellInfoVecs.cellAreaGrowthProgressNonMitotic[w] += cellAreaGrowthProgressNonMitoticSpeed;//0.00005; } else{ cellInfoVecs.contractileSpringGrowthProgress[w] = 0.0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[w] += cellAreaGrowthProgressNonMitoticSpeed;//0.00005; cellInfoVecs.cellAreaGrowthProgress[w] += cellAreaGrowthProgressSpeed;//0.00005; cellInfoVecs.distFromBasalLoc[w] += distFromBasalLocSpeed; cellInfoVecs.distFromApicalLoc[w] += distFromApicalLocSpeed; // if (w == 86){ // std::cout<<"cellInfoVecs.distFromNucleus_normal[w] : " <<cellInfoVecs.distFromNucleus_normal[w]<<std::endl; // std::cout<<"cellInfoVecs.distFromNucleus_normal_apical[w] : "<<cellInfoVecs.distFromNucleus_normal_apical[w]<<std::endl; // std::cout<<"distFromNucleus_normal[w]*_individualCellHeight[w] : "<<cellInfoVecs.distFromNucleus_normal[w]*cellInfoVecs.individualCellHeight[w]<<std::endl; // std::cout<<"distFromNucleus_normal_apical[w]*_individualCellHeight[w] : "<<cellInfoVecs.distFromNucleus_normal_apical[w]*cellInfoVecs.individualCellHeight[w]<<std::endl; // } } } // std::cout<< " *** 4.5 ***" << std::endl; int nucRepopuRate = 5000;//2000; // if (timeRatio > timeRatio_Crit_Division && relaxCount % nucRepopuRate == 0){ if (relaxCount % nucRepopuRate == 0){ // if (relaxCount == 100){ // std::cout<<"Nucleus are being repopuluated every "<<nucRepopuRate<<" time stpes"<<std::endl; // } uint targetCellRank; for (int i = 0; i < allocPara_m.currentActiveCellCount; i++){ if (cellInfoVecs.isEnteringMitotic[i] == false){ continue; } targetCellRank = i; if (targetCellRank < 2){ continue; } else if (targetCellRank > 62 && targetCellRank < 86){ continue; } else if (cellInfoVecs.activeIntnlNodeCounts[targetCellRank] >= allocPara_m.maxIntnlNodePerCell){ continue; } int startIndex = targetCellRank*allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; int currentNumOfNucleus = 0; for (int j = startIndex; j < (targetCellRank+1)*allocPara_m.maxAllNodePerCell; j++){ if (nodes->getInfoVecs().nodeIsActive[j] == true){ currentNumOfNucleus+=1; } } int numOfMissingNulceus = allocPara_m.maxIntnlNodePerCell - currentNumOfNucleus; // std::cout<<"Currently missing "<<numOfMissingNulceus<<" nucleus particle in cell "<<targetCellRank<<std::endl; double total_nucNodeLocX = 0.0; double total_nucNodeLocY = 0.0; for (int k = 0; k < currentNumOfNucleus; k++){ total_nucNodeLocX += nodes->getInfoVecs().nodeLocX[startIndex + k]; total_nucNodeLocY += nodes->getInfoVecs().nodeLocY[startIndex + k]; } total_nucNodeLocX = total_nucNodeLocX/currentNumOfNucleus; total_nucNodeLocY = total_nucNodeLocY/currentNumOfNucleus; nodes->getInfoVecs().nodeLocX[startIndex + currentNumOfNucleus] = total_nucNodeLocX; nodes->getInfoVecs().nodeLocY[startIndex + currentNumOfNucleus] = total_nucNodeLocY; nodes->getInfoVecs().nodeIsActive[startIndex + currentNumOfNucleus] = true; // if (numOfMissingNulceus != 0){ // for (int k = 0; k < numOfMissingNulceus; k++){ // nodes->getInfoVecs().nodeLocX[startIndex + currentNumOfNucleus + k] = // nodes->getInfoVecs().nodeLocX[startIndex + k] + 0.01; // nodes->getInfoVecs().nodeLocY[startIndex + currentNumOfNucleus + k] = // nodes->getInfoVecs().nodeLocY[startIndex + k] + 0.01; // nodes->getInfoVecs().nodeIsActive[startIndex + currentNumOfNucleus + k] = true; // } // } currentNumOfNucleus = 0; for (int j = startIndex; j < (targetCellRank+1)*allocPara_m.maxAllNodePerCell; j++){ if (nodes->getInfoVecs().nodeIsActive[j] == true){ currentNumOfNucleus+=1; } cellInfoVecs.activeIntnlNodeCounts[targetCellRank] = currentNumOfNucleus; } // std::cout<<"number of nucleus nodes in cell["<<targetCellRank<<"] = "<<currentNumOfNucleus<<" after repopulation"<<std::endl; // std::cout<<"CellRank = "<<targetCellRank<<"new activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[targetCellRank]<<std::endl; } } // std::cout << " *** 4.75 ***" << endl; // computeApicalLoc(timeRatio, timeRatio_Crit_Division); //Ali // ////// // // std::cout << " *** 5 ***" << endl; // computeBasalLoc(); //Ali // std::cout << " *** 6 ***" << endl; // for (int i = 0; i < allocPara_m.currentActiveCellCount; i++){ // cellInfoVecs.individualCellHeight[i] = sqrt((cellInfoVecs.apicalLocX[i] - cellInfoVecs.basalLocX[i])*(cellInfoVecs.apicalLocX[i] - cellInfoVecs.basalLocX[i]) + // (cellInfoVecs.apicalLocY[i] - cellInfoVecs.basalLocY[i])*(cellInfoVecs.apicalLocY[i] - cellInfoVecs.basalLocY[i])); // cellInfoVecs.individualCellHeight_springHeight[i] = cellInfoVecs.individualCellHeight[i]*0.3; // //Note: what is calculated here really? Well, we want to extend a distance away from the cell center (-ish) position to determine how far // // contractile spring will be placed. We now assume that the upper and lower 20% of the cell height is covered by contractile spring, // // therefore, leading to the non-spring portion taking 60% (hence 30% above center and 30% below center). // cellInfoVecs.distFromNucleus_normal[i] = -1.0*cellInfoVecs.individualCellHeight_springHeight[i]; // cellInfoVecs.distFromNucleus_normal_apical[i] = 1.0*cellInfoVecs.individualCellHeight_springHeight[i]; // } computeApicalLoc(timeRatio, timeRatio_Crit_Division); computeBasalLoc(); uint recalculateCellHeight = 4; if (timeRatio == 0){// || relaxCount%recalculateCellHeight == 0){ computeIndividualCellHeight(distFromNucleus_normalMax1,distFromNucleus_normalMax2,distFromNucleus_normalMax3, distFromNucleus_normalMax_apical1,distFromNucleus_normalMax_apical2,distFromNucleus_normalMax_apical3);//This function does the above commented out computation. // computeIndividualCellHeight_Ver2();//Ver2 does cell height calculation only, not manipulating the number of contractile springs. } else if (relaxCount%recalculateCellHeight == 0){ computeIndividualCellHeight_Ver2();//Ver2 does cell height calculation only, not manipulating the number of contractile springs. } #ifdef debugModeECM hipEventRecord(start3, 0); hipEventSynchronize(start3); hipEventElapsedTime(&elapsedTime2, start2, start3); #endif // eCMCellInteraction(cellPolar,subCellPolar,tmpIsInitPhase, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount); eCMCellInteraction(cellPolar,subCellPolar,tmpIsInitPhase, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount, mitoticThreshold); if (cellInfoVecs.isOneTimeStepPostDivision != false){ std::cout<<"Condition ' cellInfoVecs.isOneTimeStepPostDivision != false ' is satisfied"<<std::endl; std::cout<<"cellInfoVecs.isOneTimeStepPostDivision = "<<cellInfoVecs.isOneTimeStepPostDivision<<std::endl; std::cout<<"cellInfoVecs.isPostDivision = "<<cellInfoVecs.isPostDivision<<std::endl; /*for (int kk = 0; kk < cellInfoVecs.eCMNeighborId.size(); kk++){ std::cout<<"eCMNeighborId["<<kk<<"] after division= "<<cellInfoVecs.eCMNeighborId[kk]<<std::endl; }*/ } if (cellInfoVecs.isPostDivision == true){ cellInfoVecs.isOneTimeStepPostDivision = true; cellInfoVecs.isPostDivision = false; std::cout<<"Condition ' cellInfoVecs.isPostDivision == true ' is satisfied"<<std::endl; std::cout<<"cellInfoVecs.isOneTimeStepPostDivision = "<<cellInfoVecs.isOneTimeStepPostDivision<<std::endl; std::cout<<"cellInfoVecs.isPostDivision = "<<cellInfoVecs.isPostDivision<<std::endl; } else if (cellInfoVecs.isPostDivision == false && cellInfoVecs.isOneTimeStepPostDivision == true){ cellInfoVecs.isTwoTimeStepPostDivision = false;//true; cellInfoVecs.isOneTimeStepPostDivision = false; } else{ cellInfoVecs.isOneTimeStepPostDivision = false; cellInfoVecs.isTwoTimeStepPostDivision = false; } if (cellInfoVecs.isPostAddMembrNodes == true){ cellInfoVecs.isPostAddMembrNodes = false; } // if (timeRatio > timeRatio_Crit_Division && nodes->isECMPairPrinted==false){ // if (timeRatio == timeRatio_Crit_Division){ // // std::cout<<"bdrynodecount = "<<allocPara_m.bdryNodeCount<<std::endl; // // for (int kk = 0; kk < cellInfoVecs.basalLocX.size(); kk++){ // // // std::cout<<"BasalLocX["<<kk<<"] = "<< cellInfoVecs.basalLocX[kk]<<std::endl; // // // std::cout<<"BasalLocY["<<kk<<"] = "<< cellInfoVecs.basalLocY[kk]<<std::endl; // // } // for (int kk = 0; kk < (allocPara_m.currentActiveCellCount*allocPara_m.maxAllNodePerCell); kk++){ // if (kk/allocPara_m.maxAllNodePerCell ==86 || kk/allocPara_m.maxAllNodePerCell == 31 || kk/allocPara_m.maxAllNodePerCell == 32 ||kk/allocPara_m.maxAllNodePerCell == 30){ // } // else{ // continue; // } // } // for (int kk = 0; kk < cellInfoVecs.eCMNeighborId.size(); kk++){ // std::cout<<"eCMNeighborId["<<kk<<"] = "<<cellInfoVecs.eCMNeighborId[kk]<<std::endl; // } // nodes->isECMPairPrinted=true; // } // std::cout << " *** 7 ***" << endl; #ifdef debugModeECM hipEventRecord(start4, 0); hipEventSynchronize(start4); hipEventElapsedTime(&elapsedTime3, start3, start4); #endif computeCenterPos_M2(); //Ali // std::cout << " *** 8 ***" << endl; computeInternalAvgPos_M(); //Ali // right now internal points represent nucleus // std::cout << " *** 9 ***" << endl; //computeNucleusLoc() ; #ifdef debugModeECM hipEventRecord(start5, 0); hipEventSynchronize(start5); hipEventElapsedTime(&elapsedTime4, start4, start5); #endif if (isInitNucPercentCalculated==false && controlPara.resumeSimulation==0) { computeNucleusIniLocPercent(); //Ali writeNucleusIniLocPercent(); //Ali isInitNucPercentCalculated=true ; cout << " I computed initial location of nucleus positions in percent" << endl; } else if (isInitNucPercentCalculated==false && controlPara.resumeSimulation==1){ readNucleusIniLocPercent(); //Ali isInitNucPercentCalculated=true ; for (int j = 0; j < cellInfoVecs.nucleusLocPercent.size(); j++){ std::cout<<"nucleusLocPercent["<<j<<"] = "<<cellInfoVecs.nucleusLocPercent[j]<<std::endl; } cout << " I read initial location of nucleus positions in percent, since I am in resume mode" << endl; } // std::cout << " *** 10 ***" << endl; // if (timeRatio == 0){ computeNucleusDesireLoc() ; // Ali // } // if (timeRatio == timeRatio_Crit_Division || timeRatio == timeRatio_Crit_Division+0.2){ // std::cout<<"timeRatio = "<<timeRatio<<std::endl; // std::cout<<"cellInfoVecs.nucDesireDistApical[31] = "<<cellInfoVecs.nucDesireDistApical[31]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordX[31] = "<<cellInfoVecs.centerCoordX[31]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordY[31] = "<<cellInfoVecs.centerCoordY[31]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocX[31] = "<<cellInfoVecs.apicalLocX[31]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocY[31] = "<<cellInfoVecs.apicalLocY[31]<<std::endl; // std::cout<<"cellInfoVecs.nucDesireDistApical[87] = "<<cellInfoVecs.nucDesireDistApical[87]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordX[87] = "<<cellInfoVecs.centerCoordX[87]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordY[87] = "<<cellInfoVecs.centerCoordY[87]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocX[87] = "<<cellInfoVecs.apicalLocX[87]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocY[87] = "<<cellInfoVecs.apicalLocY[87]<<std::endl; // } // std::cout << " *** 11 ***" << endl; #ifdef debugModeECM hipEventRecord(start6, 0); hipEventSynchronize(start6); hipEventElapsedTime(&elapsedTime5, start5, start6); #endif // if (tmpIsInitPhase==false) { // updateInternalAvgPosByNucleusLoc_M (); // } //PlotNucleus (lastPrintNucleus, outputFrameNucleus) ; //BC_Imp_M() ; //Ali applySceCellDisc_M(); // std::cout << " *** 12 ***" << endl; #ifdef debugModeECM hipEventRecord(start7, 0); hipEventSynchronize(start7); hipEventElapsedTime(&elapsedTime6, start6, start7); #endif if (isBasalActinPresent) { // cout << " I am applying basal contraction" << endl ; // applyMembContraction(timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division, distFromNucleus_max, distFromNucleus_min, percentage_before_timeRatio_Crit_Division_scaling) ; // Ali applyMembContraction2(timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division, distFromNucleus_max, distFromNucleus_min,mitoRndActomyoStrengthScaling);// percentage_before_timeRatio_Crit_Division_scaling) ; // Kevin } // std::cout << " *** 13 ***" << endl; #ifdef debugModeECM hipEventRecord(start8, 0); hipEventSynchronize(start8); hipEventElapsedTime(&elapsedTime7, start7, start8); #endif // applyNucleusEffect() ; // applyForceInteractionNucleusAsPoint() ; applyMemForce_M(cellPolar,subCellPolar); // if (timeRatio > timeRatio_Crit_Division && nodes->isActinLevelDisplayed == false){ // for (int w = 0; w < nodes->getInfoVecs().nodeActinLevel.size(); w++){ // if (w/allocPara_m.maxAllNodePerCell == 31 || w/allocPara_m.maxAllNodePerCell == 86){ // std::cout<<"actin level["<<w/allocPara_m.maxAllNodePerCell<<"] = "<<nodes->getInfoVecs().nodeActinLevel[w]<<std::endl; // } // } // nodes->isActinLevelDisplayed = true; // } // std::cout << " *** 14 ***" << endl; #ifdef debugModeECM hipEventRecord(start9, 0); hipEventSynchronize(start9); hipEventElapsedTime(&elapsedTime8, start8, start9); #endif applyVolumeConstraint(timeRatio, timeRatio_Crit_Division, volume_Increase_Target_Ratio, volume_Increase_Scale, postDivision_restorationRateScale, cycle); //Ali // std::cout << " *** 15 ***" << endl; if (timeRatio > timeRatio_Crit_Division && nodes->isActinLevelDisplayed == false){ for (int kk = 0; kk < cellInfoVecs.cellAreaVec.size(); kk++){ // if (kk == 31 || kk == 86){ // std::cout<<"Cell["<<kk<<"] area = "<<cellInfoVecs.cellAreaVec[kk]<<std::endl; // } // for (int kk = 0; kk < allocPara_m.maxCellCount; kk++){ // double cellRank = kk; // uint intnlIndxMemBegin = cellRank * allocPara_m.maxAllNodePerCell; // uint intnlIndxBegin = cellRank * allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; // uint intnlIndxEnd = intnlIndxBegin + cellInfoVecs.activeIntnlNodeCounts[kk]; // std::cout<<"cellRank = "<<cellRank<<", intnlIndxMemBegin = "<<intnlIndxMemBegin<<", intnlIndxBegin = "<<intnlIndxBegin<<", intnlIndxEnd = "<<intnlIndxEnd<<std::endl; // } } // for (int kk = 0; kk < cellInfoVecs.Cell_Damp.size(); kk++){ // std::cout<<"Node Damping for cell ["<<kk<<"] = "<<cellInfoVecs.Cell_Damp[kk]<<std::endl; // } nodes->isActinLevelDisplayed=true; } #ifdef debugModeECM hipEventRecord(start10, 0); hipEventSynchronize(start10); hipEventElapsedTime(&elapsedTime9, start9, start10); #endif //ApplyExtForces() ; // now for single cell stretching //computeContractileRingForces() ; // computeCenterPos_M(); //Ali cmment // // if (isCellGrowSet==false) { // growAtRandom_M(dt); // growAtRandom_M(growthProgressSpeed); growAtRandom_M_Ver2(growthProgressSpeed, mitoticThreshold); // cout << "I set the growth level. Since the cells are not growing a divising for this simulation I won't go inside this function any more" << endl ; // isCellGrowSet=true ; // } // std::cout << " *** 16 ***" << endl; relaxCount=relaxCount+1 ; // std::cout << " *** 17 ***" << endl; distributeCellGrowthProgress_M(); // std::cout << " *** 18 ***" << endl; findTangentAndNormal_M();//AAMIRI ADDED May29 // std::cout << " *** 19 ***" << endl; StoreNodeOldPositions() ; // std::cout << " *** 20 ***" << endl; #ifdef debugModeECM hipEventRecord(start11, 0); hipEventSynchronize(start11); hipEventElapsedTime(&elapsedTime10, start10, start11); #endif allComponentsMove_M(); // std::cout << " *** 21 ***" << endl; int checkForMitosisAndDivision = 250; int checkForOverextension = 500; bool membrAddingNode = false; // if (1 < 0){ if (relaxCount % checkForMitosisAndDivision == 0){ if (allocPara_m.currentActiveCellCount < allocPara_m.maxCellCount){ // std::cout<<"contractileSpringGrowthProgress[10] = "<<cellInfoVecs.contractileSpringGrowthProgress[10]<<std::endl; enterMitoticCheckForDivAxisCal(mitoticThreshold) ; divide2D_M(volume_Increase_Target_Ratio, timeRatio, thresholdToIntroduceNewCell); // std::cout<<" *** 4.25 *** "<< std::endl; if (relaxCount % checkForOverextension == 0){ updateMembrGrowthProgress_M(); handleMembrGrowth_M(maxApicalBasalNodeNum, maxLengthToAddMemNodes); } nodes->adhUpdate=true; } } #ifdef debugModeECM hipEventRecord(start12, 0); hipEventSynchronize(start12); hipEventElapsedTime(&elapsedTime11, start11, start12); #endif //allComponentsMoveImplicitPart() ; // if (relaxCount%checkForMitosisAndDivision==0) { // updateMembrGrowthProgress_M(); // handleMembrGrowth_M(); // // std::cout << " *** 10 ***" << endl; // std::cout.flush(); // // relaxCount=0 ; // Ali // nodes->adhUpdate=true; // Ali // } # ifdef debugModeECM hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime12, start12, stop); std::cout << "time 1 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in cell for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; std::cout << "time 4 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ; std::cout << "time 5 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ; std::cout << "time 6 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ; std::cout << "time 7 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ; std::cout << "time 8 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ; std::cout << "time 9 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime9 << endl ; std::cout << "time 10 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime10 << endl ; std::cout << "time 11 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime11 << endl ; std::cout << "time 12 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime12 << endl ; #endif // std::cout << " *** 22 ***" << endl; } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; std::vector<CVector> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpApicalLoc = thrust::device_vector<double>(2,0.0); divAuxData.tmpBasalLoc = thrust::device_vector<double>(2,0.0); divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( divAuxData.nodeStorageCount, -1); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIntAvgX_M = thrust::device_vector<double>( //Ali divAuxData.toBeDivideCount, 0); divAuxData.tmpIntAvgY_M = thrust::device_vector<double>( //Ali divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType1 = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType2 = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin(), divAuxData.tmpNodeType.begin(), divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin(), divAuxData.tmpIntAvgX_M.begin(), divAuxData.tmpIntAvgY_M.begin() )), isTrue()); for (int w = 0; w < cellInfoVecs.isDividing.size(); w++){ if (cellInfoVecs.isDividing[w] == true){ divAuxData.tmpApicalLoc[0] = cellInfoVecs.apicalLocX[w]; divAuxData.tmpApicalLoc[1] = cellInfoVecs.apicalLocY[w]; divAuxData.tmpBasalLoc[0] = cellInfoVecs.basalLocX[w]; divAuxData.tmpBasalLoc[1] = cellInfoVecs.basalLocY[w]; } } } void SceCells::copyCellsForPrintingOnly_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; printAuxData.nodeStorageCount = printAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; // std::cout<<"copy 1"<<std::endl; printAuxData.tmpApicalLoc = thrust::device_vector<double>(2,0.0); printAuxData.tmpBasalLoc = thrust::device_vector<double>(2,0.0); // std::cout<<"copy 2"<<std::endl; printAuxData.tmpIsActive_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, true); printAuxData.tmpNodePosX_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodePosY_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali printAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( printAuxData.nodeStorageCount, -1); // std::cout<<"copy 3"<<std::endl; printAuxData.tmpCellRank_M = thrust::device_vector<uint>( printAuxData.toBeDivideCount, 0); printAuxData.tmpDivDirX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpDivDirY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpCenterPosX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpCenterPosY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); // std::cout<<"copy 4"<<std::endl; printAuxData.tmpIntAvgX_M = thrust::device_vector<double>( //Ali printAuxData.toBeDivideCount, 0); printAuxData.tmpIntAvgY_M = thrust::device_vector<double>( //Ali printAuxData.toBeDivideCount, 0); // std::cout<<"copy 5"<<std::endl; printAuxData.tmpIsActive1_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, false); printAuxData.tmpXPos1_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpYPos1_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType1 = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali // std::cout<<"copy 6"<<std::endl; printAuxData.tmpIsActive2_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, false); printAuxData.tmpXPos2_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpYPos2_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType2 = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali // std::cout<<"copy 7"<<std::endl; //A&A printAuxData.tmpHertwigXdir = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpHertwigYdir = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); //A&A // std::cout<<"copy 8"<<std::endl; // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(printAuxData.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(printAuxData.tmpIsActive_M.begin(), printAuxData.tmpNodePosX_M.begin(), printAuxData.tmpNodePosY_M.begin(), printAuxData.tmpNodeType.begin(), printAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // std::cout<<"copy 9"<<std::endl; // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(printAuxData.tmpCellRank_M.begin(), printAuxData.tmpDivDirX_M.begin(), printAuxData.tmpDivDirY_M.begin(), printAuxData.tmpHertwigXdir.begin(), printAuxData.tmpHertwigYdir.begin(), printAuxData.tmpCenterPosX_M.begin(), printAuxData.tmpCenterPosY_M.begin(), printAuxData.tmpIntAvgX_M.begin(), printAuxData.tmpIntAvgY_M.begin() )), isTrue()); // std::cout<<"copy 10"<<std::endl; // for (int w = 0; w < printAuxData.isDividing.size(); w++){ // if (printAuxData.isDividing[w] == true){ // printAuxData.tmpApicalLoc[0] = cellInfoVecs.apicalLocX[w]; // printAuxData.tmpApicalLoc[1] = cellInfoVecs.apicalLocY[w]; // printAuxData.tmpBasalLoc[0] = cellInfoVecs.basalLocX[w]; // printAuxData.tmpBasalLoc[1] = cellInfoVecs.basalLocY[w]; // } // } } // void SceCells::copyCellsEnterMitotic() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount // * allocPara_m.maxAllNodePerCell; // std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; // divAuxData.tmpIsActive_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, true); // divAuxData.tmpNodePosX_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodePosY_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( // divAuxData.nodeStorageCount, notAssigned1); //Ali // divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( // divAuxData.nodeStorageCount, -1); // divAuxData.tmpCellRank_M = thrust::device_vector<uint>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpDivDirX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpDivDirY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // // step 2 , continued // copy node info values ready for division /comment A&A // thrust::counting_iterator<uint> iStart(0); // // thrust::copy_if( // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)), // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)) // // + totalNodeCountForActiveCells, // // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // // make_transform_iterator(iStart, // // DivideFunctor(allocPara_m.maxAllNodePerCell))), // // thrust::make_zip_iterator( // // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // // divAuxData.tmpNodePosX_M.begin(), // // divAuxData.tmpNodePosY_M.begin(), // // divAuxData.tmpNodeType.begin())), isTrue()); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin(), // divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A // thrust::counting_iterator<uint> iBegin(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), // divAuxData.tmpDivDirX_M.begin(), // divAuxData.tmpDivDirY_M.begin(), // divAuxData.tmpCenterPosX_M.begin(), // divAuxData.tmpCenterPosY_M.begin())), isTrue()); // } // void SceCells::copyCellsEnterDivision() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // divAuxData.nodeStorageCount = divAuxData.toBeDivideCount // * allocPara_m.maxAllNodePerCell; // std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; // divAuxData.tmpIsActive_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, true); // divAuxData.tmpNodePosX_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodePosY_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( // divAuxData.nodeStorageCount, notAssigned1); //Ali // divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( // divAuxData.nodeStorageCount, -1); // divAuxData.tmpCellRank_M = thrust::device_vector<uint>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpDivDirX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpDivDirY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // // step 2 , continued // copy node info values ready for division /comment A&A // thrust::counting_iterator<uint> iStart(0); // // thrust::copy_if( // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)), // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)) // // + totalNodeCountForActiveCells, // // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // // make_transform_iterator(iStart, // // DivideFunctor(allocPara_m.maxAllNodePerCell))), // // thrust::make_zip_iterator( // // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // // divAuxData.tmpNodePosX_M.begin(), // // divAuxData.tmpNodePosY_M.begin(), // // divAuxData.tmpNodeType.begin())), isTrue()); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin(), // divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A // thrust::counting_iterator<uint> iBegin(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), // divAuxData.tmpDivDirX_M.begin(), // divAuxData.tmpDivDirY_M.begin(), // divAuxData.tmpCenterPosX_M.begin(), // divAuxData.tmpCenterPosY_M.begin())), isTrue()); // } void SceCells::copyCellsEnterDivision() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( divAuxData.nodeStorageCount, -1); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount)), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin())), isTrue()); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin(), divAuxData.tmpNodeType.begin(), divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); divAuxData.isMotherCellBehind.clear(); //Ali //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; vector<CVector> cellCenterLine_Basal2Apical; vector<CVector> cellCenterLine_Apical2Basal; vector<CVector> cellCenterLine_Basal2Apical_leftShift; vector<CVector> cellCenterLine_Basal2Apical_rightShift; vector<CVector> cellCenterLine_Apical2Basal_leftShift; vector<CVector> cellCenterLine_Apical2Basal_rightShift; vector<double> cellCenterLine_MirrorLength_Basal2Apical; vector<double> cellCenterLine_MirrorLength_Apical2Basal; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // std::cout<<"HAHA ERROR 1"<<std::endl; // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali obtainMembrAndIntnlNodesPlusNodeType2(i, membrNodes, intnlNodes, nodeTypeIndxDiv, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_Basal2Apical_rightShift, cellCenterLine_Apical2Basal_leftShift, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Basal2Apical, cellCenterLine_MirrorLength_Apical2Basal); // std::cout<<"HAHA ERROR 2"<<std::endl; CVector oldCellCenter = obtainCellCenter(i); // std::cout<<"oldCellCenter = "<<oldCellCenter.x<<" "<<oldCellCenter.y<<std::endl; // std::cout<<"HAHA ERROR 3"<<std::endl; CVector oldNucleusCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // std::cout<<"HAHA ERROR 4"<<std::endl; // CVector oldIntCenter = obtainIntCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir, true_divDir; // divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A // divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A divDir.x = divAuxData.tmpBasalLoc[0] - oldNucleusCenter.x; divDir.y = divAuxData.tmpBasalLoc[1] - oldNucleusCenter.y; true_divDir = divDir.rotateNegativeNintyDeg_XY_CC(); // std::cout<<"true_divDir = "<<true_divDir.x<<" "<<true_divDir.y<<std::endl; // std::cout<<"HAHA ERROR 5"<<std::endl; // double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCellCenter, membrNodes);//A&A added double lenAlongHertwigAxis = calLengthAlongHertwigAxis(true_divDir, oldNucleusCenter, membrNodes); // std::cout<<"HAHA ERROR 6"<<std::endl; std::vector<VecValT> tmp1Membr, tmp2Membr; CVector intCell1Center, intCell2Center; // obtain the center of two cell along the shortest distance between the membrane nodes of mother cell. There is also a tuning factor to shift the centers inside the cell "shiftRatio" // obtainTwoNewIntCenters(oldIntCenter, divDir, lenAlongHertwigAxis, intCell1Center, // intCell2Center); obtainTwoNewIntCenters(oldNucleusCenter, true_divDir, lenAlongHertwigAxis, intCell1Center, intCell2Center); // decide each membrane nodes and internal nodes of mother cell is going to belongs to daugther cell 1 or 2. Also shrink the internal nod position along the aixs connecting mother cell to the internal nodes by a factor given as an input in the name of "Shrink ratio" // prepareTmpVec(i, divDir, oldCellCenter, oldIntCenter,tmp1Membr, tmp2Membr); prepareTmpVec(i, divDir, oldNucleusCenter, oldNucleusCenter,tmp1Membr, tmp2Membr); // for (int n = 0; n < tmp1Membr.size(); n++){ // // std::cout<<"tmp1Membr["<<n<<"].val = "<<tmp1Membr[n].val<<", .vec = "<<tmp1Membr[n].vec.x<<" "<<tmp1Membr[n].vec.y<<" "<<tmp1Membr[n].vec.z<<", type = "<<tmp1Membr[n].type<<std::endl; // } // for (int m = 0; m < tmp2Membr.size(); m++){ // // std::cout<<"tmp2Membr["<<m<<"].val = "<<tmp2Membr[m].val<<", .vec = "<<tmp2Membr[m].vec.x<<" "<<tmp2Membr[m].vec.y<<" "<<tmp2Membr[m].vec.z<<", type = "<<tmp2Membr[m].type<<std::endl; // } //create the two new membrane line based on the specified distance. // processMemVec(i, tmp1Membr, tmp2Membr, oldNucleusCenter); processMemVec_Ver2(i, tmp1Membr, tmp2Membr, oldNucleusCenter, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_Basal2Apical_rightShift, cellCenterLine_Apical2Basal_leftShift, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Basal2Apical,cellCenterLine_MirrorLength_Apical2Basal ); // shift the internal to make sure the center of new daugther cell is exactly similar to what have chosen in the function "obtainTwoNewCenters" shiftIntnlNodesByCellCenter(intCell1Center, intCell2Center); // assemble two new daughter cells information. assembleVecForTwoCells(i); } //divDebug(); } void SceCells::printCellCenterLine_M(int p) { printAuxData.tmp1MemActiveCounts.clear(); printAuxData.tmp1InternalActiveCounts.clear(); printAuxData.tmp2MemActiveCounts.clear(); printAuxData.tmp2InternalActiveCounts.clear(); printAuxData.isMotherCellBehind.clear(); for (uint i = 0; i < printAuxData.toBeDivideCount; i++) { printAuxData.tmp1IntnlVec.clear(); printAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; vector<CVector> cellCenterLine_Basal2Apical; vector<CVector> cellCenterLine_Apical2Basal; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // std::cout<<"HAHA ERROR 1"<<std::endl; // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali obtainMembrAndIntnlNodesPlusNodeType2_printingOnly(i, membrNodes, intnlNodes, nodeTypeIndxDiv, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal); for (int j = 0; j < cellCenterLine_Apical2Basal.size(); j++){ std::cout<<"cellRank = "<<p<<" "<<cellCenterLine_Apical2Basal[j].x<<" "<<cellCenterLine_Apical2Basal[j].y<<std::endl; } } } //A&A // void SceCells::findHertwigAxis() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali // CVector oldCellCenter = obtainCellCenter(i);// cell center // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // }// This is the original code that find HertwigAxis based on cell center //This is the new code that find HertwigAxis based on nucleus center // void SceCells::findHertwigAxis() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // // CVector oldCellCenter = obtainCellCenter(i);// cell center // CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // } // void SceCells::findHertwigAxis_useBasalApicalLoc() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // // CVector oldCellCenter = obtainCellCenter(i);// cell center // CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // CVector divDir; // CVector tmpDivDir; // tmpDivDir.x = cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]; // tmpDivDir.y = cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank]; // double length = sqrt((cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank])*(cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]) + // (cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])*(cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])); // tmpDivDir.x = tmpDivDir.x/length; // tmpDivDir.y = tmpDivDir.y/length; // divDir = tmpDivDir.rotateNintyDeg_XY_CC(); // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // } void SceCells::findHertwigAxis_useBasalApicalLoc() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); uint isDividingCount = 0; //divDebug(); for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ isDividingCount += 1; } } if (isDividingCount > 1){ std::cout<<"More than one cell undergoing division! Instability very likely to occur!"<<std::endl; } for (uint i = 0; i < isDividingCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; std::pair <int ,int > ringIds ; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // CVector oldCellCenter = obtainCellCenter(i);// cell center CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin double lenAlongMajorAxis; //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali CVector divDir; CVector tmpDivDir; tmpDivDir.x = cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]; tmpDivDir.y = cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank]; double length = sqrt((cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank])*(cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]) + (cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])*(cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])); tmpDivDir.x = tmpDivDir.x/length; tmpDivDir.y = tmpDivDir.y/length; divDir = tmpDivDir.rotateNintyDeg_XY_CC(); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; std::cout<<divDir.x<<"HertwigXdir " <<std::endl; std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M(double quiescence1, double quiescence1_half) { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; //Ali to preserve the neighbors information of each cell for the copySecondCellArr_M function if two neighbor cell divide at eaxctly one time step and the order // of mother and daughter cells are oppposite the methodology won't work. I think it almost never this situation will happen. // cout<<"nodeCellRankFront size = "<<nodes->getInfoVecs().nodeCellRankFront.size()<<std::endl; // cout<<"nodeCellRankFrontOld size = "<<nodes->getInfoVecs().nodeCellRankFrontOld.size()<<std::endl; // cout<<"nodeCellRankBehind size = "<<nodes->getInfoVecs().nodeCellRankBehind.size()<<std::endl; // cout<<"nodeCellRankBehindOld size = "<<nodes->getInfoVecs().nodeCellRankBehindOld.size()<<std::endl; thrust::copy (nodes->getInfoVecs().nodeCellRankFront.begin(),nodes->getInfoVecs().nodeCellRankFront.begin()+allocPara_m.currentActiveCellCount, nodes->getInfoVecs().nodeCellRankFrontOld.begin()) ; thrust::copy (nodes->getInfoVecs().nodeCellRankBehind.begin(),nodes->getInfoVecs().nodeCellRankBehind.begin()+allocPara_m.currentActiveCellCount, nodes->getInfoVecs().nodeCellRankBehindOld.begin()) ; cout << "Number of cells ready to divide in this time step is " <<divAuxData.toBeDivideCount << endl ; if (divAuxData.toBeDivideCount>1) { cout << "Warnining: at Least two cells divided at the same time step chance of error in finding next neighbor of each cell"<< endl ; } for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; std::cout<<"cellRank undergoing division = "<<cellRank<<std::endl; uint cellRankDaughter = allocPara_m.currentActiveCellCount + i; //Ali //WE WILL UPDATE THE NUCLEUSLOCPERCENTAGE HERE! cellInfoVecs.nucleusLocPercent[cellRankDaughter] = cellInfoVecs.nucleusLocPercent[cellRank]; std::cout<<"nucleusLocPercent Mother["<<cellRank<<"] = "<<cellInfoVecs.nucleusLocPercent[cellRank]<<std::endl; std::cout<<"nucleusLocPercent Daughter["<<cellRankDaughter<<"] = "<<cellInfoVecs.nucleusLocPercent[cellRankDaughter]<<std::endl; // std::cout<<"cellRankDaughter = "<<cellRankDaughter<<std::endl; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; // std::cout<<"nodeStartIndx = "<<nodeStartIndx<<std::endl; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; // std::cout<<"tmpStartIndx = "<<tmpStartIndx<<std::endl; // std::cout<<"tmpEndIndx = "<<tmpEndIndx<<std::endl; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); // for (int index = 0; index < divAuxData.tmpNodeType1.size(); index++){ // std::cout<<"divAuxData.tmpNodeType1["<<index<<"] = "<<divAuxData.tmpNodeType1[index]<<std::endl; // } thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType1.begin())) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2, divAuxData.tmpNodeType1.begin())) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin(), nodes->getInfoVecs().memNodeType1.begin() )) // the 1 in memNodeType1 is not representing cell number 1 but in the rest it represents + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[cellRank]<<std::endl; // } cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeMembrNodeCounts = "<<cellInfoVecs.activeMembrNodeCounts[cellRank]<<std::endl; // } double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRank] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRank]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence1_half; //quiescence1*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence1; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank]; } cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; cellInfoVecs.daughterCellProduced[cellRank] += 1; // std::cout<<"divAuxData.isMotherCellBehind["<<i<<"] = "<<divAuxData.isMotherCellBehind[i]<<std::endl; //Ali // if (divAuxData.isMotherCellBehind[i]) { if (divAuxData.isMotherCellBehind[i]==true) { //nodes->getInfoVecs().nodeCellRankBehindNeighb[cellRank] =nodes->getInfoVecs().nodeCellRankBehindNeighb[cellRank] ; //as before so no need to update nodes->getInfoVecs().nodeCellRankFront[cellRank] =cellRankDaughter ; int tmpCellRankFront=nodes->getInfoVecs().nodeCellRankFrontOld[cellRank] ; nodes->getInfoVecs().nodeCellRankBehind[tmpCellRankFront] =cellRankDaughter ; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"copy first cell array, cellRank = "<<cellRank<<" , "<<"tmpCellRankFront = "<<tmpCellRankFront<<std::endl; // std::cout<<"1nodeCellRankBehind["<<tmpCellRankFront<<"] = "<<nodes->getInfoVecs().nodeCellRankBehind[tmpCellRankFront]<<std::endl; // for (int j = nodeStartIndx; j < nodeStartIndx+(tmpEndIndx-tmpStartIndx); j++){ // std::cout<<nodes->getInfoVecs().nodeLocX[j]<<" "<<nodes->getInfoVecs().nodeLocY[j]<<" "<<nodes->getInfoVecs().memNodeType1[j]<<std::endl; // } // } } else { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =cellRankDaughter ; // nodes->getInfoVecs().nodeCellRankFrontNeighb[cellRank] = nodes->getInfoVecs().nodeCellRankFrontNeighb[cellRank]; //as before so no need to update int tmpCellRankBehind=nodes->getInfoVecs().nodeCellRankBehindOld[cellRank] ; nodes->getInfoVecs().nodeCellRankFront[tmpCellRankBehind] =cellRankDaughter ; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"cellRank = "<<cellRank<<" , "<<"tmpCellRankBehind = "<<tmpCellRankBehind<<std::endl; // std::cout<<"2nodeCellRankFront["<<tmpCellRankBehind<<"] = "<<nodes->getInfoVecs().nodeCellRankFront[tmpCellRankBehind]<<std::endl; // for (int j = nodeStartIndx; j < nodeStartIndx+(tmpEndIndx-tmpStartIndx); j++){ // std::cout<<nodes->getInfoVecs().nodeLocX[j]<<" "<<nodes->getInfoVecs().nodeLocY[j]<<" "<<nodes->getInfoVecs().memNodeType1[j]<<std::endl; // } // } } cellInfoVecs.numApicalVec[cellRank] = 0; cellInfoVecs.numBasalVec[cellRank] = 0; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } cellInfoVecs.cellRankVec[cellRank] = cellRank; std::cout<<"Cell["<<cellRank<<"] has "<<cellInfoVecs.numApicalVec[cellRank]<<" apical nodes and "<<cellInfoVecs.numBasalVec[cellRank]<<" basal nodes initially post division"<<std::endl; // if (cellRank == 31 || cellRank == 86){ // for (int k = 0; k < maxAllNodePerCell; k++){ // std::cout<<"cellRank = "<<cellRank<<std::endl; // std::cout<<"nodes->getInfoVecs().memNodeType1["<<k+nodeStartIndx<<"] = "<<nodes->getInfoVecs().memNodeType1[k+nodeStartIndx]<<std::endl; // } // } } } void SceCells::copySecondCellArr_M(double quiescence2, double quiescence2_half) { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { int cellRankMother=divAuxData.tmpCellRank_M[i] ; //Ali divAuxData.cellRankMother = divAuxData.tmpCellRank_M[i]; // std::cout<<"cellRankMother = "<<cellRankMother<<std::endl; uint cellRank = allocPara_m.currentActiveCellCount + i; divAuxData.cellRankDaughter = cellRank; // std::cout<<"cellRank = "<<cellRank<<std::endl; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; // std::cout<<"tmpStartIndx for tmpNodeType2 = "<<tmpStartIndx<<std::endl; // std::cout<<"tmpEndIndx for tmpNodeType2 = "<<tmpEndIndx<<std::endl; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); // for (int index = 0; index < divAuxData.tmpNodeType2.size(); index++){ // std::cout<<"divAuxData.tmpNodeType2["<<index<<"] = "<<divAuxData.tmpNodeType2[index]<<std::endl; // } thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType2.begin() )) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType2.begin())) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin(), nodes->getInfoVecs().memNodeType1.begin())) // 1 is not representing cell 1 + nodeStartIndx); uint cellRankToCopyFrom = cellRankMother * maxAllNodePerCell+ allocPara_m.bdryNodeCount; double actomyoMultipMother = cellInfoVecs.contractActomyo_multip_perCell[cellRankMother]; double actomyoMultipMother_apical = cellInfoVecs.contractActomyo_multip_apical_perCell[cellRankMother]; cellInfoVecs.contractActomyo_multip_perCell[cellRank] = actomyoMultipMother; cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank] = actomyoMultipMother_apical; for (int j = nodeStartIndx; j < nodeStartIndx+maxAllNodePerCell; j++){ nodes->getInfoVecs().contractActomyo_multip[j] = actomyoMultipMother;//nodes->getInfoVecs().contractActomyo_multip[cellRankToCopyFrom]; nodes->getInfoVecs().contractActomyo_multip_apical[j] = actomyoMultipMother_apical;//nodes->getInfoVecs().contractActomyo_multip_apical[cellRankToCopyFrom]; if (j==nodeStartIndx){ std::cout<<"contractActomyo_multip for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip_apical[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; } if (j==nodeStartIndx+maxAllNodePerCell-1){ std::cout<<"contractActomyo_multip for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip_apical[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; } } cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[cellRank]<<std::endl;; // } cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeMembrNodeCounts = "<<cellInfoVecs.activeMembrNodeCounts[cellRank]<<std::endl;; // } double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRankMother] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRankMother]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence2_half;//quiescence2*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence2; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } // cellInfoVecs.growthProgress[cellRank] = quiescence2; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank]; } cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cellRoot[cellRank] = cellInfoVecs.cellRoot[cellRankMother]; //Ali cellInfoVecs.eCellTypeV2[cellRank] = cellInfoVecs.eCellTypeV2[cellRankMother]; //Ali cellInfoVecs.distFromNucleus_normal[cellRank] = cellInfoVecs.distFromNucleus_normal[cellRankMother]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = cellInfoVecs.distFromNucleus_normal_apical[cellRankMother]; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; std::cout<<"New Cell "<<cellRank<<" introduced!"<<std::endl; std::cout<<"distFromNucleus_normal : "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"distFromNucleus_normal_apical : "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; std::cout<<"isEnteringMitotic[cellRank] : "<<cellInfoVecs.isEnteringMitotic[cellRank]<<std::endl; //Ali // std::cout<<"divAuxData.isMotherCellBehind["<<i<<"] = "<<divAuxData.isMotherCellBehind[i]<<std::endl; if (divAuxData.isMotherCellBehind[i]==true) { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =cellRankMother ; nodes->getInfoVecs().nodeCellRankFront[cellRank] =nodes->getInfoVecs().nodeCellRankFrontOld[cellRankMother]; } else { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =nodes->getInfoVecs().nodeCellRankBehindOld[cellRankMother]; nodes->getInfoVecs().nodeCellRankFront[cellRank] =cellRankMother ; } cellInfoVecs.numApicalVec[cellRank] = 0; cellInfoVecs.numBasalVec[cellRank] = 0; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } cellInfoVecs.cellRankVec[cellRank] = cellRank; std::cout<<"Cell["<<cellRank<<"] has "<<cellInfoVecs.numApicalVec[cellRank]<<" apical nodes and "<<cellInfoVecs.numBasalVec[cellRank]<<" basal nodes initially post division"<<std::endl; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { // for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ // if (cellInfoVecs.isDividing[i] == true){ // cellInfoVecs.growthProgress[i] = 0.0; // cellInfoVecs.cellAreaGrowthProgress[i] = 0.0; // cellInfoVecs.cellAreaGrowthProgressNonMitotic[i] = 0.0; // cellInfoVecs.isEnteringMitotic[i] = false; // } // } thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::ApplyExtForces() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; //for (int i=0 ; i <nodes->getInfoVecs().memNodeType1.size(); i++ ) { // if (nodes->getInfoVecs().memNodeType1[i]==basal1) { // cout << " I am a basal node with id="<< i << " and vx before applying external force is equal to " <<nodes->getInfoVecs().nodeVelX[i] << endl ; // } //} thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), AddExtForces(curTime)); //for (int i=0 ; i <nodes->getInfoVecs().memNodeType1.size(); i++ ) { // if (nodes->getInfoVecs().memNodeType1[i]==basal1) { // cout << " I am a basal node with id="<< i << " and vx is equal to " <<nodes->getInfoVecs().nodeVelX[i] << endl ; // } //} } void SceCells::applyMemForce_M(bool cellPolar,bool subCellPolar) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0), iBegin2(0) ; //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali thrust::device_vector<double>::iterator MinY_Itr_Cell=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr_Cell=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; double minY_Cell= *MinY_Itr_Cell ; //This variable doesn't seemed to be used even when passed into functions //Kevin double maxY_Cell= *MaxY_Itr_Cell ; //This variable doesn't seemed to be used even when passed into functions //Kevin double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); int* nodeAdhereIndexAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[0])); //assuming that number of boundary nodes are equal to zero int* cellRootAddr = thrust::raw_pointer_cast( &(cellInfoVecs.cellRoot[0])); // Ali // if (curTime>10.05) { // for (int i=0; i<nodes->getInfoVecs().nodeAdhereIndex.size(); i++) { // cout<<"node adhere index"<<i+allocPara_m.bdryNodeCount<<" is" <<nodes->getInfoVecs().nodeAdhereIndex[i]<<endl ; // } // exit (EXIT_FAILURE) ; // } //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.eCellTypeV2.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().isSubApicalJunction.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)))), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.eCellTypeV2.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().isSubApicalJunction.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)))) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeActinLevel.begin(), ActinLevelCal(maxAllNodePerCell,nodeIsActiveAddr,cellRootAddr,minY_Cell,maxY_Cell,cellPolar,subCellPolar)); //double a ; //for(int i=0 ; i<totalNodeCountForActiveCells ; i++) { // a=static_cast<double>(nodes->getInfoVecs().nodeAdhereIndex[i]-i); // cout<< "adhere index of node " << i << " is " << nodes->getInfoVecs().nodeAdhereIndex[i] << endl ; // cout<< "the normalized difference is" <<a/(2.0*680) <<"the difference is " << a << "2 time max node per cell is " << 2*maxAllNodePerCell << endl ; // } double* nodeActinLevelAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActinLevel[0])); //assuming that number of boundary nodes are equal to zero thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeAdhereIndex.begin() + allocPara_m.bdryNodeCount, make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeAdhereIndex.begin() + allocPara_m.bdryNodeCount, make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, nodeAdhereIndexAddr,nodeActinLevelAddr, grthPrgrCriVal_M,minY_Cell,maxY_Cell)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrLinSpringEnergy.begin(), nodes->getInfoVecs().membrBendSpringEnergy.begin())), CalMembrEnergy(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr,nodeActinLevelAddr, grthPrgrCriVal_M)); energyCell.totalMembrLinSpringEnergyCell=0.5 *(thrust::reduce ( nodes->getInfoVecs().membrLinSpringEnergy.begin(), nodes->getInfoVecs().membrLinSpringEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalMembrBendSpringEnergyCell=thrust::reduce ( nodes->getInfoVecs().membrBendSpringEnergy.begin(), nodes->getInfoVecs().membrBendSpringEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() ); energyCell.totalNodeIIEnergyCell=0.5*(thrust::reduce ( nodes->getInfoVecs().nodeIIEnergy.begin(), nodes->getInfoVecs().nodeIIEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalNodeIMEnergyCell=0.5*(thrust::reduce ( nodes->getInfoVecs().nodeIMEnergy.begin(), nodes->getInfoVecs().nodeIMEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalNodeEnergyCellOld=energyCell.totalNodeEnergyCell ; energyCell.totalNodeEnergyCell=energyCell.totalMembrLinSpringEnergyCell + energyCell.totalMembrBendSpringEnergyCell + energyCell.totalNodeIIEnergyCell + energyCell.totalNodeIMEnergyCell ; int timeStep=curTime/dt ; if ( (timeStep % 10000)==0 ) { string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string cSVFileName = "EnergyExportCell_" + uniqueSymbolOutput + ".CSV"; ofstream EnergyExportCell ; EnergyExportCell.open(cSVFileName.c_str(),ofstream::app); EnergyExportCell <<curTime<<","<<energyCell.totalMembrLinSpringEnergyCell << "," <<energyCell.totalMembrBendSpringEnergyCell << "," <<energyCell.totalNodeIIEnergyCell<<"," <<energyCell.totalNodeIMEnergyCell<<", "<< energyCell.totalNodeEnergyCell <<std::endl; } double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceX.begin(), nodes->getInfoVecs().nodeInterCellForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceX.begin(), nodes->getInfoVecs().nodeInterCellForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), //Absoulte value since we know it is always repulsion. only it is used for output data nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(), nodes->getInfoVecs().nodeInterCellForceNormal.begin(), // Absolute value to be consittent only it is used for output data nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeInternalAvgPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), CVec2Divide()); } void SceCells::computeInternalMaxMinPos_M() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // thrust::counting_iterator<uint> iBegin(0); // thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); // //uint totalMembrActiveNodeCount = thrust::reduce( // // cellInfoVecs.activeMembrNodeCounts.begin(), // // cellInfoVecs.activeMembrNodeCounts.begin() // // + allocPara_m.currentActiveCellCount); // uint totalIntnlActiveNodeCount = thrust::reduce( // cellInfoVecs.activeIntnlNodeCounts.begin(), // cellInfoVecs.activeIntnlNodeCounts.begin() // + allocPara_m.currentActiveCellCount); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // make_transform_iterator(iBegin, // DivideFunctor( // allocPara_m.maxAllNodePerCell)), // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount)), // thrust::make_zip_iterator( // thrust::make_tuple( // make_transform_iterator(iBegin, // DivideFunctor( // allocPara_m.maxAllNodePerCell)), // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeCellType.begin())) // + allocPara_m.bdryNodeCount, // thrust::make_zip_iterator( // thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), // cellNodeInfoVecs.activeXPoss.begin(), // cellNodeInfoVecs.activeYPoss.begin())), // ActiveAndIntnl()); // thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), // cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, // thrust::make_zip_iterator( // thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), // cellNodeInfoVecs.activeYPoss.begin())), // cellInfoVecs.cellRanksTmpStorage.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), // thrust::equal_to<uint>(), CVec2Add()); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.activeIntnlNodeCounts.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), CVec2Divide()); } void SceCells::applyVolumeConstraint(double timeRatio, double timeRatio_Crit_Division, double volume_Increase_Target_Ratio, double volume_Increase_Scale, double postDivision_restorationRateScale, int cycle) { calCellArea(); // for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ // if (cellInfoVecs.cellAreaVec[k] < 0){ // cellInfoVecs.cellAreaVec[k] = -1.0*cellInfoVecs.cellAreaVec[k]; // } // } // !!!!!NOTE!!!!!! this is currently an ad hoc way to make sure area of each triangle is positive. if (timeRatio == timeRatio_Crit_Division || timeRatio == timeRatio_Crit_Division+0.2){ std::cout<<"Current timeRatio = "<<timeRatio<<std::endl; for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ std::cout<<"Cell["<<k<<"] area = "<<cellInfoVecs.cellAreaVec[k]<<std::endl; std::cout<<"CellCenter = "<<cellInfoVecs.centerCoordX[k]<<" "<<cellInfoVecs.centerCoordY[k]<<std::endl; } } if (timeRatio > timeRatio_Crit_Division && nodes->isCellAreaDisplayed==false){ for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ // if (k == 31 || k == 86){ // std::cout<<"Cell["<<k<<"] area = "<<cellInfoVecs.cellAreaVec[k]<<std::endl; // std::cout<<"CellCenter = "<<cellInfoVecs.centerCoordX[k]<<" "<<cellInfoVecs.centerCoordY[k]<<std::endl; // } } nodes->isCellAreaDisplayed=true; } // computeLagrangeForces(); // if (timeRatio >= timeRatio_Crit_Division){ // double cellAreaDesire=(cellInfoVecs.cellAreaVec[divAuxData.cellRank_division]) + (65 - cellInfoVecs.cellAreaVec[divAuxData.cellRank_division])*postDivision_restorationRateScale*(timeRatio - timeRatio_Crit_Division )/(1.0 - timeRatio_Crit_Division); // double cellAreaDesire2=(cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2]) + (65 - cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2])*postDivision_restorationRateScale*(timeRatio - timeRatio_Crit_Division )/(1.0 - timeRatio_Crit_Division); // std::cout<<"cellAreaCurrent["<<divAuxData.cellRank_division<<"] = "<<cellInfoVecs.cellAreaVec[divAuxData.cellRank_division]<<" , "<<"cellAreaCurrent["<<divAuxData.cellRank_division2<<"] = "<<cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2]<<std::endl; // std::cout<<"cellAreaDesire["<<divAuxData.cellRank_division<<"] = "<<cellAreaDesire<<" , "<<"cellAreaDesire["<<divAuxData.cellRank_division2<<"] = "<<cellAreaDesire2<<std::endl; // } computeLagrangeForces(timeRatio, volume_Increase_Target_Ratio); } void SceCells::computeCenterPos_M2() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalMembrActiveNodeCount = thrust::reduce( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount); //uint totalIntnlActiveNodeCount = thrust::reduce( // cellInfoVecs.activeIntnlNodeCounts.begin(), // cellInfoVecs.activeIntnlNodeCounts.begin() // + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndMembr()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalMembrActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " cell center in X direction is " << cellInfoVecs.centerCoordX[i] << endl ; cout << "for cell rank "<<i<< " cell center in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; } */ } void SceCells::computeLagrangeForces(double timeRatio, double volume_Increase_Target_Ratio) { uint maxMembrNode = allocPara_m.maxMembrNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double* cellAreaVecAddr= thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaVec[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; ECellType* eCellTypeV2Addr= thrust::raw_pointer_cast( &(cellInfoVecs.eCellTypeV2[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); double* cellAreaGrowthProgress = thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaGrowthProgress[0])); double* cellAreaGrowthProgressNonMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaGrowthProgressNonMitotic[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().lagrangeFX.begin(), nodes->getInfoVecs().lagrangeFY.begin(), nodes->getInfoVecs().lagrangeFN.begin())), AddLagrangeForces(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr,cellAreaVecAddr,grthPrgrCriVal_M, eCellTypeV2Addr, timeRatio, volume_Increase_Target_Ratio, isEnteringMitotic, cellAreaGrowthProgress, cellAreaGrowthProgressNonMitotic)); uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin2(0); thrust::reduce_by_key( make_transform_iterator(iBegin2, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin2, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().lagrangeFX.begin(), nodes->getInfoVecs().lagrangeFY.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.sumLagrangeFPerCellX.begin(), cellInfoVecs.sumLagrangeFPerCellY.begin())), thrust::equal_to<uint>(), CVec2Add()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " the summation of lagrangian force in X direction is " << cellInfoVecs.sumLagrangeFPerCellX[i] << endl ; cout << "for cell rank "<<i<< " the summation of lagrangian force in Y direction is " << cellInfoVecs.sumLagrangeFPerCellY[i] << endl ; } */ } void SceCells::computeContractileRingForces() { uint maxMembrNode = allocPara_m.maxMembrNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringApicalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringBasalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringApicalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringBasalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddContractileRingForces(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, grthPrgrCriVal_M)); } void SceCells::BC_Imp_M() { thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; double MinX= *MinX_Itr ; double MaxX= *MaxX_Itr ; double MinY= *MinY_Itr ; double MaxY= *MaxY_Itr ; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(MinX,MaxX,MinY,MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::assignMemNodeType() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin2(0) ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin(), make_transform_iterator(iBegin2,ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))))), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin(), make_transform_iterator(iBegin2,ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))))) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeIsApicalMem.begin(), nodes->getInfoVecs().nodeIsBasalMem.begin())) ,AssignMemNodeType()); } // This function is written with the assumption that there is at least one basal point for each cell. void SceCells::computeBasalLoc() { uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); int* basalNodeCountAddr = thrust::raw_pointer_cast( &(cellInfoVecs.basalNodeCount[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsBasalMem.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.basalNodeCount.begin(), thrust::equal_to<uint>(), thrust::plus<int>()); uint totalBasalNodeCount = thrust::reduce( cellInfoVecs.basalNodeCount.begin(), cellInfoVecs.basalNodeCount.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeLocXBasal.begin(), cellNodeInfoVecs.activeLocYBasal.begin())), ActiveAndBasal()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalBasalNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeLocXBasal.begin(), cellNodeInfoVecs.activeLocYBasal.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin())), thrust::equal_to<uint>(), CVec2Add()); // up to here basaLocX and basalLocY are the summation. We divide them // thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin())), BasalLocCal(basalNodeCountAddr)); } void SceCells::computeApicalLoc(double timeRatio, double timeRatio_Crit_Division) { uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // std::cout<<"AHAHA 1"<<std::endl; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); // int* apicalNodeCountAddr = thrust::raw_pointer_cast( // &(cellInfoVecs.apicalNodeCount[0])); // std::cout<<"AHAHA 2"<<std::endl; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsApicalMem.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.apicalNodeCount.begin(), thrust::equal_to<uint>(), thrust::plus<int>()); int* apicalNodeCountAddr = thrust::raw_pointer_cast( &(cellInfoVecs.apicalNodeCount[0])); // std::cout<<"AHAHA 2.5"<<std::endl; int sizeApical=cellInfoVecs.apicalNodeCount.size() ; // std::cout<<"AHAHA 3"<<std::endl; uint totalApicalNodeCount = thrust::reduce( cellInfoVecs.apicalNodeCount.begin(), cellInfoVecs.apicalNodeCount.begin() + allocPara_m.currentActiveCellCount); // std::cout<<"AHAHA 4"<<std::endl; thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeLocXApical.begin(), cellNodeInfoVecs.activeLocYApical.begin())), ActiveAndApical()); //for (int i=sizeApical-40 ; i<sizeApical ; i++) { // cout << " the location of apical node " << i << " is "<<cellNodeInfoVecs.activeLocXApical[i] << " and " << cellNodeInfoVecs.activeLocYApical[i] << endl ; //} // std::cout<<"AHAHA 5"<<std::endl; thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalApicalNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeLocXApical.begin(), cellNodeInfoVecs.activeLocYApical.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::equal_to<uint>(), CVec2Add()); // up to here apicalLocX and apicalLocY are the summation. We divide them if at lease one apical node exist. // 0,0 location for apical node indicates that there is no apical node. /* // I comment this section since for now all the cells have apical node // // special consideration for the cells with no apical nodes int NumCellsWithApicalNode=0 ; for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { if (cellInfoVecs.apicalNodeCount[i]!=0) { NumCellsWithApicalNode=NumCellsWithApicalNode +1; } } */ //finish commenting speical consideration for the cells with no apical node //simply these two are equal int NumCellsWithApicalNode=allocPara_m.currentActiveCellCount ; // // std::cout<<"AHAHA 6"<<std::endl; //cout << "num of cells with apical node is " << NumCellsWithApicalNode << endl ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )) + NumCellsWithApicalNode, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), ApicalLocCal(apicalNodeCountAddr)); /* I comment this section since for this simulation all the cells have apical node // start special consideration for the cells which have no apical node //reargment to also include the cell which have not apical cells and assign the location for them as 0,0 for (int i=0 ; i<allocPara_m.currentActiveCellCount-1 ; i++) { // if the cell with 0 apical node is at the end, we are fine. if (cellInfoVecs.apicalNodeCount[i]==0) { cout << " I am inside complicated loop" << endl ; for (int j=allocPara_m.currentActiveCellCount-2 ; j>=i ; j--) { cellInfoVecs.apicalLocX[j+1]=cellInfoVecs.apicalLocX[j] ; cellInfoVecs.apicalLocY[j+1]=cellInfoVecs.apicalLocY[j] ; } cellInfoVecs.apicalLocX[i]=0 ; cellInfoVecs.apicalLocY[i]=0 ; } } if (cellInfoVecs.apicalNodeCount[allocPara_m.currentActiveCellCount-1]==0) { // if the cell with 0 apical node is at the end, no rearrngment is required cellInfoVecs.apicalLocX[allocPara_m.currentActiveCellCount-1]=0 ; cellInfoVecs.apicalLocY[allocPara_m.currentActiveCellCount-1]=0 ; } // finish special consideration for the cells that have not apical nodes */ // if (timeRatio == timeRatio_Crit_Division){ // std::cout<<"totalNodeCountForActiveCells = "<<totalNodeCountForActiveCells<<std::endl; // std::cout<<"nodes->getInfoVecs().nodeIsApicalMem = "<<nodes->getInfoVecs().nodeIsApicalMem.size()<<std::endl; // for (int i = 0; i < nodes->getInfoVecs().nodeIsApicalMem.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<nodes->getInfoVecs().nodeIsApicalMem[i]<<std::endl; // } // else if (i == nodes->getInfoVecs().nodeIsApicalMem.size()-1){ // std::cout<<"i = "<<i<<", "<<nodes->getInfoVecs().nodeIsApicalMem[i]<<std::endl; // } // else{continue;} // } // std::cout<<"cellInfoVecs.cellRanksTmpStorage = "<<cellInfoVecs.cellRanksTmpStorage.size()<<std::endl; // for (int i = 0; i < cellInfoVecs.cellRanksTmpStorage.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.cellRanksTmpStorage[i]<<std::endl; // } // else if (i == cellInfoVecs.cellRanksTmpStorage.size()-1){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.cellRanksTmpStorage[i]<<std::endl; // } // else{continue;} // } // std::cout<<"cellInfoVecs.apicalNodeCount = "<<cellInfoVecs.apicalNodeCount.size()<<std::endl; // for (int i = 0; i < cellInfoVecs.apicalNodeCount.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.apicalNodeCount[i]<<std::endl; // } // else if (i == cellInfoVecs.apicalNodeCount.size()-1){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.apicalNodeCount[i]<<std::endl; // } // else{continue;} // } // } } // this function is not currently active. It is useful when the level of growth needs to be related to nucleus location. void SceCells::computeNucleusLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())), CalNucleusLoc()); //for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { // cout << "for cell rank "<< i << " Cell progress is " << cellInfoVecs.growthProgress[i] << endl ; // cout << "for cell rank "<< i << " Nucleus location in X direction is " << cellInfoVecs.nucleusLocX[i] <<" in Y direction is " << cellInfoVecs.nucleusLocY[i] << endl ; // cout << "for cell rank "<< i << " apical location in X direction is " << cellInfoVecs.apicalLocX[i] <<" in Y direction is " << cellInfoVecs.apicalLocY[i] << endl ; // cout << "for cell rank "<< i << " center location in X direction is " << cellInfoVecs.centerCoordX[i] <<" in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; //} } void SceCells::computeIndividualCellHeight(double distFromNucleus_normalMax1,double distFromNucleus_normalMax2,double distFromNucleus_normalMax3, double distFromNucleus_normalMax_apical1, double distFromNucleus_normalMax_apical2, double distFromNucleus_normalMax_apical3){ // double* individualCellHeightPreMitotic = thrust::raw_pointer_cast(&(cellInfoVecs.individualCellHeight[0])); thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.individualCellHeight.begin(), cellInfoVecs.distFromNucleus_normal.begin(), cellInfoVecs.distFromNucleus_normal_apical.begin())), CalCellHeight( distFromNucleus_normalMax1, distFromNucleus_normalMax2, distFromNucleus_normalMax3, distFromNucleus_normalMax_apical1, distFromNucleus_normalMax_apical2, distFromNucleus_normalMax_apical3)); } void SceCells::computeIndividualCellHeight_Ver2(){ // double* individualCellHeightPreMitotic = thrust::raw_pointer_cast(&(cellInfoVecs.individualCellHeight[0])); thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.individualCellHeight.begin(), CalCellHeight_Ver2()); } void SceCells::computeNucleusDesireLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusDesireLocX.begin(), cellInfoVecs.nucleusDesireLocY.begin(), cellInfoVecs.nucDesireDistApical.begin())), CalNucleusDesireLoc()); } void SceCells::computeCellCenterPerturbedLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cellCenterPerturbedLocLocX.begin(), cellInfoVecs.cellCenterPerturbedLocLocY.begin(), cellInfoVecs.cellCenterDesireDistApical.begin())), CalCellCenterPerturbedLoc()); } void SceCells::computeNucleusIniLocPercent() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.nucleusLocPercent.begin(), CalNucleusIniLocPercent()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<< i << " nucleus cell percent is " << cellInfoVecs.nucleusLocPercent[i] << endl ; cout << "for cell rank "<< i << " cell center in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; cout << "for cell rank "<< i << " apical location in Y direction is " << cellInfoVecs.apicalLocY[i] << endl ; cout << "for cell rank "<< i << " Internal average in Y direction is " << cellInfoVecs.InternalAvgY[i] << endl ; } */ } // this function is not currently active. It is used when 1) internal nodes are used to represent the nucleus 2) we wanted to force the internal nodes to be at desired location. The problem with this method is that it will create net unphysical force on the cell. void SceCells::updateInternalAvgPosByNucleusLoc_M () { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.tmpShiftVecX.begin(), cellInfoVecs.tmpShiftVecY.begin())), CalShiftVec()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), AdjustInternalNodesLoc(maxMemNodePerCell)); } // void SceCells::growAtRandom_M(double dt) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // cout << "dt inside growAtRandom_M is: "<< dt << endl ; // randomizeGrowth_M(); // updateGrowthProgress_M(); // decideIsScheduleToGrow_M(); // //computeCellTargetLength_M(); // //computeDistToCellCenter_M(); // //findMinAndMaxDistToCenter_M(); // //computeLenDiffExpCur_M(); // //stretchCellGivenLenDiff_M(); // addPointIfScheduledToGrow_M(); // //decideIsScheduleToShrink_M();// AAMIRI May5 // //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 // int currentActiveCellCount = allocPara_m.currentActiveCellCount ; // thrust::device_vector<double>::iterator minCellProgress_Itr=thrust::min_element(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin()+ currentActiveCellCount) ; // double minCell_Progress= *minCellProgress_Itr ; // if (minCell_Progress > 0 ) { // to not intefer with initialization with negative progress and no cell should divide before every one is positive. // adjustGrowthInfo_M(); // // } // } // void SceCells::growAtRandom_M(double dt) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // // cout << "dt inside growAtRandom_M is: "<< dt << endl ; // // randomizeGrowth_M(); // updateGrowthProgress_M(); // // decideIsScheduleToGrow_M(); // // int currentActiveCellCount = allocPara_m.currentActiveCellCount ; // // thrust::device_vector<double>::iterator minCellProgress_Itr=thrust::min_element(cellInfoVecs.growthProgress.begin(), // // cellInfoVecs.growthProgress.begin()+ currentActiveCellCount) ; // // double minCell_Progress= *minCellProgress_Itr ; // // if (minCell_Progress > 0 ) { // to not intefer with initialization with negative progress and no cell should divide before every one is positive. // // adjustGrowthInfo_M(); // // // } // } void SceCells::growAtRandom_M(double growthProgressSpeed) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; updateGrowthProgress_M(growthProgressSpeed); } void SceCells::growAtRandom_M_Ver2(double growthProgressSpeed, double mitoticThreshold) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // cellRankFront = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankFront[0])); // cellRankBehind = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankBehind[0])); updateGrowthProgress_M_Ver2(growthProgressSpeed, mitoticThreshold); } //Ali void SceCells::enterMitoticCheckForDivAxisCal(double mitoticThreshold) { bool isEnteringMitotic = decideIfAnyCellEnteringMitotic(mitoticThreshold) ; //A&A //A&A // if (isEnteringMitotic){ // std::cout<< "I am in EnteringMitotic"<< std::endl; // copyCellsEnterMitotic(); // // findHertwigAxis(); // findHertwigAxis_useBasalApicalLoc(); // } } void SceCells::divide2D_M(double volume_Increase_Target_Ratio, double timeRatio, double thresholdToIntroduceNewCell) { bool isDivisionPresent = decideIfGoingToDivide_M(volume_Increase_Target_Ratio); // std::cout<<"isDivisionPresent = "<<isDivisionPresent<<", number of cells undergoing division = "<<divAuxData.toBeDivideCount<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; if (isDivisionPresent == false) { // std::cout<<"No cell division is present!"<<std::endl; return; } uint seed = time(NULL); thrust::default_random_engine rng(seed); rng.discard(allocPara_m.currentActiveCellCount); thrust::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::uniform_real_distribution<double> distribution_half(0.0, 0.5); // thrust::uniform_real_distribution<double> distribution_half(0.0, 0.25); // if (timeRatio >= 0.5){ // thrust::uniform_real_distribution<double> distribution(2.0, 4.0); // } double quiescence1, quiescence1_half; double quiescence2, quiescence2_half; double isNewCellIntroduced; // double thresholdToIntroduceNewCell = 0.3;//0.15; std::cout<<"Chance of new cell introduction = "<<thresholdToIntroduceNewCell<<std::endl; for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ if (cellInfoVecs.cellAreaVec[i] < (65*volume_Increase_Target_Ratio*0.9)){ std::cout<<"Cell Division occurs without reaching at least 90% of the target cell volume for division! Need to readjust parameters."<<std::endl; } } } isNewCellIntroduced = distribution(rng);//(distribution(rng) - 1.0)/2.0; // if (1 > 0){ if (isNewCellIntroduced < thresholdToIntroduceNewCell){ quiescence1 = -1.0*distribution(rng); // quiescence1_half = -1.0*distribution_half(rng); quiescence1_half = 1.0*distribution_half(rng); quiescence2 = -1.0*distribution(rng); // quiescence2_half = -1.0*distribution_half(rng); quiescence2_half = 1.0*distribution_half(rng); // std::cout<<"cellArea[10] = "<<cellInfoVecs.cellAreaVec[10]<<std::endl; // std::cout<<"cellArea[19] = "<<cellInfoVecs.cellAreaVec[19]<<std::endl; // std::cout<<"cellArea[28] = "<<cellInfoVecs.cellAreaVec[28]<<std::endl; copyCellsEnterDivision(); // std::cout<<"ERROR HERE 0?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; findHertwigAxis_useBasalApicalLoc(); //aniDebug = true; // std::cout<<"ERROR HERE 1?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copyCellsPreDivision_M(); // std::cout<<"ERROR HERE 2?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; createTwoNewCellArr_M(); // main function which plays with position of internal nodes and membrane new created nodes. // std::cout<<"ERROR HERE 3?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copyFirstCellArr_M(quiescence1, quiescence1_half); // copy the first cell information to GPU level and initilize values such as cell prgoress and cell rank .. // std::cout<<"ERROR HERE 4?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copySecondCellArr_M(quiescence2, quiescence2_half);// copy the second cell information to GPU level and initilize values such as cell prgoress and cell rank .. // std::cout<<"ERROR HERE 5?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; updateActiveCellCount_M(); // std::cout<<"ERROR HERE 6?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; markIsDivideFalse_M(); // std::cout<<"ERROR HERE 7?"<<std::endl; //divDebug(); //Ali nodes->isMemNodeTypeAssigned_postCellDivision = false; cellInfoVecs.isPostDivision = true; // for (int k = 0; k < nodes->getInfoVecs().nodeCellRankFront.size(); k++){ uint cellRank_print = 0; for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out order of cells in the tissue post growth"<<std::endl; std::cout<<k<<" "<<nodes->getInfoVecs().nodeCellRankFront[cellRank_print]<<" "; } // if (k == allocPara_m.currentActiveCellCount){ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[k]<<" "<<std::endl;; // } else{ std::cout<<nodes->getInfoVecs().nodeCellRankFront[nodes->getInfoVecs().nodeCellRankFront[cellRank_print]]<<" "; } cellRank_print = nodes->getInfoVecs().nodeCellRankFront[cellRank_print]; } // for (int k = 0; k < nodes->getInfoVecs().nodeCellRankFront.size(); k++){ for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out associated multip of cells in the tissue post growth"<<std::endl; std::cout<<nodes->getInfoVecs().contractActomyo_multip[k*allocPara_m.maxAllNodePerCell]<<" "<<nodes->getInfoVecs().contractActomyo_multip[nodes->getInfoVecs().nodeCellRankFront[k]*allocPara_m.maxAllNodePerCell]<<" "; } // if (k == allocPara_m.currentActiveCellCount){ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[k]<<" "<<std::endl;; // } else{ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[nodes->getInfoVecs().nodeCellRankFront[k-1]]<<" "; std::cout<< nodes->getInfoVecs().contractActomyo_multip[nodes->getInfoVecs().nodeCellRankFront[k-1]*allocPara_m.maxAllNodePerCell]<<" "; } } for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out number of cell daughter procued"<<std::endl; } if (k == allocPara_m.currentActiveCellCount-1){ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "<<std::endl;; } else{ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "; } } } else{ double quiescence3 = -1.0*distribution(rng); // double quiescence3_half = -1.0*distribution_half(rng); double quiescence3_half = 1.0*distribution_half(rng); copyCellsEnterDivision(); uint cellRank = divAuxData.tmpCellRank_M[0]; double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRank] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRank]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence3_half;//quiescence3*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence3; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank];//quiescence3; } // cellInfoVecs.growthProgress[cellRank] = quiescence3; cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isDividing[cellRank] = false; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; cellInfoVecs.daughterCellProduced[cellRank] += 1; for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out number of cell daughter procued"<<std::endl; } if (k == allocPara_m.currentActiveCellCount){ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<" "<<std::endl;; } else{ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "; } } int startIndex = cellRank*allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; int numOfNucleus = cellInfoVecs.activeIntnlNodeCounts[cellRank]; std::cout<<"Current number of nucleus labeled as 'active' = "<<numOfNucleus<<", before attempting to reduce the number due to out-of-plane cell division"<<std::endl; // if (cellInfoVecs.activeIntnlNodeCounts[cellRank] < 48){ // std::cout<<"Reducing nucleus size in out-of-plane cell division. But the current nucleus account is less than the default value (default = 24) ! SOMETHING IS WRONG!"<<std::endl; // } double activeNucNodeX = 0.0, activeNucNodeY = 0.0; cellInfoVecs.activeIntnlNodeCounts[cellRank] = 0; for (int i = 0; i < numOfNucleus; i++){ if (i < 24){ nodes->getInfoVecs().nodeIsActive[i + startIndex] = true; activeNucNodeX += nodes->getInfoVecs().nodeLocX[i + startIndex]; activeNucNodeY += nodes->getInfoVecs().nodeLocY[i + startIndex]; // std::cout<<"Saved nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; cellInfoVecs.activeIntnlNodeCounts[cellRank] += 1; } else{ nodes->getInfoVecs().nodeIsActive[i + startIndex] = false; nodes->getInfoVecs().nodeLocX[i + startIndex] = 0.0; nodes->getInfoVecs().nodeLocY[i + startIndex] = 0.0; } } activeNucNodeX = activeNucNodeX/24.0; activeNucNodeY = activeNucNodeY/24.0; // std::cout<<"activeNucNodeX, Y: "<<activeNucNodeX<<", "<<activeNucNodeY<<std::endl; for (int i = 0; i < 24; i++){ if (nodes->getInfoVecs().nodeIsActive[i + startIndex] == true){ // std::cout<<"To be moved nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; // std::cout<<"shifted by X, Y ["<<i<<"]: "<<(activeNucNodeX - nodes->getInfoVecs().nodeLocX[i + startIndex])*0.9<<", "<<(activeNucNodeY - nodes->getInfoVecs().nodeLocY[i + startIndex])*0.9<<std::endl; double distFromNucToNucCenter = sqrt((-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex])*(-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex]) + (-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])*(-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])); nodes->getInfoVecs().nodeLocX[i + startIndex] = (-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex])/distFromNucToNucCenter + activeNucNodeX;//nodes->getInfoVecs().nodeLocX[i + startIndex]; nodes->getInfoVecs().nodeLocY[i + startIndex] = (-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])/distFromNucToNucCenter + activeNucNodeY;//nodes->getInfoVecs().nodeLocY[i + startIndex]; // std::cout<<"Resulting nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; } } // std::cout<<"Current number of nucleus labeled as 'active' = "<<numOfNucleus<<", after reducing the number for out-of-plane cell division"<<std::endl; } } void SceCells::eCMCellInteraction(bool cellPolar,bool subCellPolar, bool isInitPhase, double timeRatio, double timeRatio_Crit_ECM, double timeRatio_Crit_Division, int relaxCount, double mitoticThreshold) { int totalNodeCountForActiveCellsECM = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // eCMPointerCells->ApplyECMConstrain(allocPara_m.currentActiveCellCount,totalNodeCountForActiveCellsECM,curTime,dt,Damp_Coef,cellPolar,subCellPolar,isInitPhase);//, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount); eCMPointerCells->ApplyECMConstrain(allocPara_m.currentActiveCellCount,totalNodeCountForActiveCellsECM,curTime,dt,Damp_Coef,cellPolar,subCellPolar,isInitPhase, mitoticThreshold); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } void SceCells::randomizeGrowth_M() { thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; double minY_Tisu= *MinY_Itr ; double maxY_Tisu= *MaxY_Itr ; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.eCellTypeV2.begin(), cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.eCellTypeV2.begin(), cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(minY_Tisu,maxY_Tisu,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); for (int i=0 ; i<1 ; i++) { cout << "cell growth speed for rank " <<i << " is " << cellInfoVecs.growthSpeed [i] << endl ; } cout << "the min growth speed is " << growthAuxData.randomGrowthSpeedMin << endl ; cout << "the max growth speed is " << growthAuxData.randomGrowthSpeedMax << endl ; } // void SceCells::updateGrowthProgress_M() { // thrust::counting_iterator<uint> iBegin(0); // thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); // thrust::copy(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgressOld.begin()); // // thrust::transform(cellInfoVecs.growthSpeed.begin(), // // cellInfoVecs.growthSpeed.begin() // // + allocPara_m.currentActiveCellCount, // // cellInfoVecs.growthProgress.begin(), // // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthSpeed.begin(), // iBegin)), // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, // cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, // iEnd)), // cellInfoVecs.growthProgress.begin(), // progress_BCImp(dt)); // } void SceCells::updateGrowthProgress_M(double growthProgressSpeed) { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); int* daughterCellProduced = thrust::raw_pointer_cast( &(cellInfoVecs.daughterCellProduced[0])); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); // thrust::transform(cellInfoVecs.growthSpeed.begin(), // cellInfoVecs.growthSpeed.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin(), iBegin)), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, iEnd)), cellInfoVecs.growthProgress.begin(), progress_BCImp(growthProgressSpeed, daughterCellProduced)); } void SceCells::updateGrowthProgress_M_Ver2(double growthProgressSpeed, double mitoticThreshold) { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); int* cellRankFront = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankFront[0])); int* cellRankBehind = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankBehind[0])); double* currentGrowthProgress = thrust::raw_pointer_cast(&cellInfoVecs.growthProgress[0]); int* daughterCellProduced = thrust::raw_pointer_cast( &(cellInfoVecs.daughterCellProduced[0])); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); // thrust::transform(cellInfoVecs.growthSpeed.begin(), // cellInfoVecs.growthSpeed.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin(), iBegin)), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, iEnd)), cellInfoVecs.growthProgress.begin(), progress_BCImp_Ver2(growthProgressSpeed, daughterCellProduced, cellRankFront, cellRankBehind, currentGrowthProgress, mitoticThreshold)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterX = 26.0; double laserCenterY = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.InternalAvgX.begin() + activeCellCount, cellInfoVecs.InternalAvgY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M(double volume_Increase_Target_Ratio) { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.cellAreaVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.cellAreaVec.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(65.0*volume_Increase_Target_Ratio*0.9)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 1){ int howManyCellsAreDividing = 0; for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ howManyCellsAreDividing += 1; if (howManyCellsAreDividing > 1){ cellInfoVecs.isDividing[i] = false; } } } divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); std::cout<<"More than one cell is undergoing division. The number is trimmed down to "<<divAuxData.toBeDivideCount<<" to avoid model instability!"<<std::endl; } if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A // bool SceCells::decideIfAnyCellEnteringMitotic() { // // double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // // - growthAuxData.prolifDecay // // * (growthAuxData.grthProgrEndCPU // // - growthAuxData.grthPrgrCriVal_M_Ori); // double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgressOld.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgressOld.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // //CompuIsEnteringMitotic_M(0.98)); // Ali for cross section modeling // // sum all bool values which indicate whether the cell is going to divide. // // toBeDivideCount is the total number of cells going to divide. // // divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), // // cellInfoVecs.isEnteringMitotic.begin() // // + allocPara_m.currentActiveCellCount, (uint) (0)); // // if (cycle == 0){ // std::fill(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.end(),false); // cellInfoVecs.isEnteringMitotic[divAuxData.cellRank_division] = true; // // } // // else if (cycle == 1){ // // std::fill(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.end(),false); // // cellInfoVecs.isEnteringMitotic[divAuxData.cellRank_division] = true; // // } // divAuxData.toEnterMitoticCount = 1; // if (divAuxData.toEnterMitoticCount > 0) { // return true; // } else { // return false; // } // } bool SceCells::decideIfAnyCellEnteringMitotic(double grthPrgrCriVal_M) { // double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); // double grthPrgrCriVal_M = 0.8973;//divAuxData.targetCellDividingArea; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); //CompuIsEnteringMitotic_M(0.98)); // Ali for cross section modeling // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter) { //AliE uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE //thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE //thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorNodeActinLevel(maxActiveNode);//Ali thrust::host_vector<double> hostTmpVectorInterCellForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorInterCellForceNormal(maxActiveNode);//AAMIRI thrust::host_vector<int> hostTmpContractPair(maxActiveNode); // thrust::host_vector<double> hostTmpNodeVelX(maxActiveNode); // thrust::host_vector<double> hostTmpNodeVelY(maxActiveNode); thrust::host_vector<double> hostTmpNodeContrApi(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeInterCellForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeInterCellForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorInterCellForceTangent.begin(), hostTmpVectorInterCellForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it /* thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin(), //Ali nodes->getInfoVecs().nodeActinLevel.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeActinLevel.begin() //Ali )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin(),hostTmpVectorNodeActinLevel.begin() ))); */ thrust::copy(nodes->getInfoVecs().nodeActinLevel.begin(),nodes->getInfoVecs().nodeActinLevel.begin()+ maxActiveNode,hostTmpVectorNodeActinLevel.begin()); //Ali thrust::copy(nodes->getInfoVecs().basalContractPair.begin() ,nodes->getInfoVecs().basalContractPair.begin() + maxActiveNode,hostTmpContractPair.begin()); //Ali // thrust::copy(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelX.begin() + maxActiveNode, hostTmpNodeVelX.begin()); // thrust::copy(nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeVelY.begin() + maxActiveNode, hostTmpNodeVelY.begin()); thrust::copy(nodes->getInfoVecs().contractActomyo_multip_apical.begin(), nodes->getInfoVecs().contractActomyo_multip_apical.begin() + maxActiveNode, hostTmpNodeContrApi.begin()); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpInterCellForce;//AAMIRI double tmpCurv; double tmpMembTen ; double tmpActinLevel ; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeInterCellForceT, nodeInterCellForceN;//AAMIRI double aniVal; double tmpNodeVel_Mag; double tmpNodeContrApi; //double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { //tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI tmpMembTen = hostTmpVectorTenMag[index1];//Ali rawAniData.aniNodeMembTension.push_back(tmpMembTen);//Ali tmpActinLevel = hostTmpVectorNodeActinLevel[index1];//Ali rawAniData.aniNodeActinLevel.push_back(tmpActinLevel);//Ali node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE //tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+hostTmpVectorF_MI_M_N[index1] ; //AliE nodeInterCellForceT = hostTmpVectorInterCellForceTangent[index1];//AAMIRI nodeInterCellForceN = hostTmpVectorInterCellForceNormal[index1];//AAMIRI tmpInterCellForce = CVector(nodeInterCellForceT, nodeInterCellForceN, 0.0);//AAMIRI rawAniData.aniNodeInterCellForceArr.push_back(tmpInterCellForce); // tmpNodeVel_Mag = sqrt(hostTmpNodeVelX[index1]*hostTmpNodeVelX[index1] + hostTmpNodeVelY[index1]*hostTmpNodeVelY[index1]); // rawAniData.aniNodeVel_Mag.push_back(tmpNodeVel_Mag); tmpNodeContrApi = hostTmpNodeContrApi[index1]; rawAniData.aniNodeContrApi.push_back(tmpNodeContrApi); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI tmpMembTen = hostTmpVectorTenMag[index1];//Ali rawAniData.aniNodeMembTension.push_back(tmpMembTen);//Ali tmpActinLevel = hostTmpVectorNodeActinLevel[index1];//Ali rawAniData.aniNodeActinLevel.push_back(tmpActinLevel);//Ali node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeInterCellForceT = hostTmpVectorInterCellForceTangent[index1];//AAMIRI nodeInterCellForceN = hostTmpVectorInterCellForceNormal[index1];//AAMIRI tmpInterCellForce = CVector(nodeInterCellForceT, nodeInterCellForceN, 0.0);//AAMIRI // tmpNodeVel_Mag = sqrt(hostTmpNodeVelX[index1]*hostTmpNodeVelX[index1] + hostTmpNodeVelY[index1]*hostTmpNodeVelY[index1]); // rawAniData.aniNodeVel_Mag.push_back(tmpNodeVel_Mag); tmpNodeContrApi = hostTmpNodeContrApi[index1]; rawAniData.aniNodeContrApi.push_back(tmpNodeContrApi); rawAniData.aniNodeInterCellForceArr.push_back(tmpInterCellForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } // for adhesion pair for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; // rawAniData.memLinks.push_back(linkData); I don't want this type of membrane nodes links be shown. } } } // loop for links for basal contraction. Since the links are between membrane nodes, no new map needs to be created. for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; index2 = hostTmpContractPair[index1]; if (index2 == -1) { continue; } IndexMap::iterator it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; // rawAniData.internalLinks.push_back(linkData); I don't want internal node links be shown. } } } } } return rawAniData; } vector<AniResumeData> SceCells::obtainResumeData() { //AliE //Copy from GPU to CPU node properties uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpNodeLocX(maxActiveNode); thrust::host_vector<double> hostTmpNodeLocY(maxActiveNode); thrust::host_vector<double> hostTmpDppLevel(maxActiveNode); thrust::host_vector<bool> hostTmpNodeIsActive(maxActiveNode); thrust::host_vector<MembraneType1> hostTmpMemNodeType(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().dppLevel.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().dppLevel.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().memNodeType1.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpDppLevel.begin(), hostTmpNodeIsActive.begin(), hostTmpNodeLocX.begin(), hostTmpNodeLocY.begin(), hostTmpMemNodeType.begin()))); // Copy from GPU to CPU cell properties. Since cell vectors are small copy with thrust function seems unnecessary thrust::host_vector<uint> hostTmpActiveMemNodeCounts =cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<ECellType> hostTmpCellType =cellInfoVecs.eCellTypeV2 ; thrust::host_vector<double>hostTmpCellCntrX =cellInfoVecs.centerCoordX ; thrust::host_vector<double>hostTmpCellCntrY =cellInfoVecs.centerCoordY ; // Write it nicely in CPU vectorial form that can be easily wirtten in an output file. vector <AniResumeData> aniResumeDatas ; AniResumeData membraneResumeData; AniResumeData internalResumeData; AniResumeData cellResumeData; CVector tmpPos; uint index1; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < hostTmpActiveMemNodeCounts[i]; j++) { index1 = i * maxNodePerCell + j; if ( hostTmpNodeIsActive[index1]==true) { membraneResumeData.cellRank.push_back(i); // it is cell rank membraneResumeData.nodeType.push_back(hostTmpMemNodeType[index1]); membraneResumeData.signalLevel.push_back(hostTmpDppLevel[index1]); tmpPos=CVector(hostTmpNodeLocX[index1],hostTmpNodeLocY[index1],0) ; membraneResumeData.nodePosArr.push_back(tmpPos) ; } } } aniResumeDatas.push_back(membraneResumeData) ; //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = i * maxNodePerCell + j; if ( hostTmpNodeIsActive[index1]==true ) { internalResumeData.cellRank.push_back(i); // it is cell rank tmpPos=CVector(hostTmpNodeLocX[index1],hostTmpNodeLocY[index1],0) ; internalResumeData.nodePosArr.push_back(tmpPos) ; } } } aniResumeDatas.push_back(internalResumeData) ; // loop for cells for (uint i=0; i<activeCellCount; i++){ cellResumeData.cellRank.push_back(i); cellResumeData.cellType.push_back(hostTmpCellType[i]); tmpPos=CVector(hostTmpCellCntrX[i],hostTmpCellCntrY[i],0) ; cellResumeData.nodePosArr.push_back(tmpPos) ; } aniResumeDatas.push_back(cellResumeData) ; return aniResumeDatas; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec, std::vector<ECellType> &eCellTypeV1) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); thrust::copy(eCellTypeV1.begin(), eCellTypeV1.end(), cellInfoVecs.eCellTypeV2.begin()); // v2 might be bigger //for (int i=0 ; i<eCellTypeV1.size() ; i++ ) { // cout << "fourth check for cell type" << cellInfoVecs.eCellTypeV2[i] << endl ; // } } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.colorScale3 = rawAniData.aniNodeMembTension[i];//Ali //ptAniData.colorScale4 = rawAniData.aniNodeVel_Mag[i];//rawAniData.aniNodeActinLevel[i];//Ali ptAniData.colorScale4 = rawAniData.aniNodeContrApi[i]; ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.intercellForce = rawAniData.aniNodeInterCellForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); hipMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); hipMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); hipMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 hipMemcpyToSymbol(kContractMemb, &membrPara.kContractMemb, sizeof(double)); hipMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); hipMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); hipMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI hipMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); hipMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; hipMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //hipMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); hipMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); hipMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); ////////////////////// //// Block Nucleus ///////// ////////////////////// double sceNucleusParaCPU_M[5]; double U0_Nucleus = globalConfigVars.getConfigValue("NucleusCell_U0").toDouble(); double V0_Nucleus = globalConfigVars.getConfigValue("NucleusCell_V0").toDouble(); double k1_Nucleus = globalConfigVars.getConfigValue("NucleusCell_k1").toDouble(); double k2_Nucleus = globalConfigVars.getConfigValue("NucleusCell_k2").toDouble(); double nucleusLinkEffectiveRange = globalConfigVars.getConfigValue( "NucleusEffectRange").toDouble(); sceNucleusParaCPU_M[0] = U0_Nucleus; sceNucleusParaCPU_M[1] = V0_Nucleus; sceNucleusParaCPU_M[2] = k1_Nucleus; sceNucleusParaCPU_M[3] = k2_Nucleus; sceNucleusParaCPU_M[4] = nucleusLinkEffectiveRange; ////////////////////// //// Block Nucleus Division ///////// ////////////////////// double sceNucleusParaDivCPU_M[5]; double U0_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_U0_Div").toDouble(); double V0_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_V0_Div").toDouble(); double k1_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_k1_Div").toDouble(); double k2_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_k2_Div").toDouble(); double nucleusDivEffectiveRange = globalConfigVars.getConfigValue( "NucleusDivEffectRange").toDouble(); sceNucleusParaDivCPU_M[0] = U0_Nucleus_Div; sceNucleusParaDivCPU_M[1] = V0_Nucleus_Div; sceNucleusParaDivCPU_M[2] = k1_Nucleus_Div; sceNucleusParaDivCPU_M[3] = k2_Nucleus_Div; sceNucleusParaDivCPU_M[4] = nucleusDivEffectiveRange; hipMemcpyToSymbol(sceN_M, sceNucleusParaCPU_M, 5 * sizeof(double)); //Ali hipMemcpyToSymbol(sceNDiv_M, sceNucleusParaDivCPU_M, 5 * sizeof(double)); //Ali } void SceCells::updateMembrGrowthProgress_M() { // figure out membr growth speed calMembrGrowSpeed_M(); //Ali: to my understanding it doesn't do anything right now. it will be override by adjustMembrGrowSpeed_M // figure out which cells will add new point and which cell needs to delete node. // adjustMembrGrowSpeed_M(); // for now just a constant speed to give some relaxation before adding another node. // returning a bool and progress for each cell. if bool is true (a node sould be added) progress will be reset to give relaxation time after adding the node. Otherwise growth prgoress will be incremented // add membr nodes // In each time step either adding mechanism is active or deleting mechanism. It is an unneccessary complication to manage memory for both operations at one time step. // uint curActCellCt = allocPara_m.currentActiveCellCount; // thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), // cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, // cellInfoVecs.membrGrowProgress.begin(), // cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); } void SceCells::handleMembrGrowth_M(int maxApicalBasalNodeNum, double maxLengthToAddMemNodes) { if (1>0){//addNode) { decideIfAddMembrNode_M(maxApicalBasalNodeNum, maxLengthToAddMemNodes); addMembrNodes_M(); // addNode=false ; // cout << " I am in add membrane node " << endl ; } else { // decideIfDelMembrNode_M(); //Ali // delMembrNodes_M(); // addNode=true ; // cout << " I am in del membrane node " << endl ; } //membrDebug(); } // void SceCells::calMembrGrowSpeed_M() { // membrPara.membrGrowCoeff = growthAuxData.prolifDecay // * membrPara.membrGrowCoeff_Ori; // membrPara.membrGrowLimit = growthAuxData.prolifDecay // * membrPara.membrGrowLimit_Ori; // // reduce_by_key, find value of max tension and their index // thrust::counting_iterator<uint> iBegin(0); // uint maxNPerCell = allocPara_m.maxAllNodePerCell; // thrust::reduce_by_key( // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().membrTenMagRi.begin(), // make_transform_iterator(iBegin, // ModuloFunctor(maxNPerCell)), // nodes->getInfoVecs().membrLinkRiMidX.begin(), // nodes->getInfoVecs().membrLinkRiMidY.begin(), // nodes->getInfoVecs().membrDistToRi.begin())), // cellInfoVecs.cellRanksTmpStorage.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.maxDistToRiVec.begin())), // thrust::equal_to<uint>(), MaxWInfo()); // // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // // } // //Ali for min Distance // thrust::counting_iterator<uint> iBegin_min(0); // thrust::reduce_by_key( // make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)), // begin of the key // make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)) // end of the key // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().membrDistToRi.begin(), // make_transform_iterator(iBegin_min, // values to reduce by key // ModuloFunctor(maxNPerCell)) // )), // cellInfoVecs.cellRanksTmpStorage1.begin(), // to Store reduced version of key // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.minDistToRiVec.begin(), // cellInfoVecs.minTenIndxVec.begin() // to sotred the reduce verision of values // )), // thrust::equal_to<uint>(), MinWInfo()); // how to sort the keys & how to reduce the parameters assigned to based on each key // // equal_to mean how we set the beans to reduce. For example here we are saying if they are equal in Int we compare them and would peroform the reduction. // // for (int i=0 ; i<cellInfoVecs.minDistToRiVec.size() ; i++) { // // cout << "the min distance in cell" << i << " is "<<cellInfoVecs.minDistToRiVec[i] << endl ; // // cout << "the min tension index vec" << i << " is "<<cellInfoVecs.minTenIndxVec[i] << endl ; // // } // thrust::reduce_by_key( // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) // + totalNodeCountForActiveCells, // nodes->getInfoVecs().membrTensionMag.begin(), // cellInfoVecs.cellRanksTmpStorage.begin(), // cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), // thrust::plus<double>()); // thrust::transform(cellInfoVecs.aveTension.begin(), // cellInfoVecs.aveTension.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // // linear relationship with highest tension; capped by a given value // thrust::transform(cellInfoVecs.aveTension.begin(), // cellInfoVecs.aveTension.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.membrGrowSpeed.begin(), // MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); // } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin(), nodes->getInfoVecs().memNodeType1.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // cout << "At index "<<cellInfoVecs.maxTenIndxVec[i]<<std::endl; // } //Ali for min Distance thrust::counting_iterator<uint> iBegin_min(0); thrust::reduce_by_key( make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)), // begin of the key make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)) // end of the key + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrDistToRi.begin(), make_transform_iterator(iBegin_min, // values to reduce by key ModuloFunctor(maxNPerCell)) )), cellInfoVecs.cellRanksTmpStorage1.begin(), // to Store reduced version of key thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.minTenIndxVec.begin() // to sotred the reduce verision of values )), thrust::equal_to<uint>(), MinWInfo()); // how to sort the keys & how to reduce the parameters assigned to based on each key // equal_to mean how we set the beans to reduce. For example here we are saying if they are equal in Int we compare them and would peroform the reduction. // for (int i=0 ; i<cellInfoVecs.minDistToRiVec.size() ; i++) { // cout << "the min distance in cell" << i << " is "<<cellInfoVecs.minDistToRiVec[i] << endl ; // cout << "the min tension index vec" << i << " is "<<cellInfoVecs.minTenIndxVec[i] << endl ; // } thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M(int maxApicalBasalNodeNum, double maxLengthToAddMemNodes) { // decide if add membrane node given current active node count and // membr growth progresss uint curActCellCt = allocPara_m.currentActiveCellCount; uint maxMembrNode = allocPara_m.maxMembrNodePerCell; bool isInitPhase= nodes->isInitPhase ; /* thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); */ /* thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin(), cellInfoVecs.cellRankVec.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin(), cellInfoVecs.cellRankVec.begin() )) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin())), MemGrowFunc(maxMembrNode,isInitPhase, maxApicalBasalNodeNum, maxLengthToAddMemNodes)); } //Ali void SceCells::decideIfDelMembrNode_M() { uint curActCellCt = allocPara_m.currentActiveCellCount; uint maxMembrNode = allocPara_m.maxMembrNodePerCell; bool isInitPhase= nodes->isInitPhase ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.growthProgress.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.growthProgress.begin() )) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrRemovingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemDelFunc(maxMembrNode, isInitPhase)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ // void SceCells::addMembrNodes_M() { // thrust::counting_iterator<uint> iBegin(0); // uint curAcCCount = allocPara_m.currentActiveCellCount; // uint maxNodePerCell = allocPara_m.maxAllNodePerCell; // bool* nodeIsActiveAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeIsActive[0])); // double* nodeXPosAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeYPosAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // int* adhIndxAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeAdhereIndex[0])); // MembraneType1* memNodeType1 = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // thrust::transform_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())) // + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())), // AddMemNode(maxNodePerCell, // nodeIsActiveAddress, // nodeXPosAddress, // nodeYPosAddress, // adhIndxAddr, // memNodeType1), // thrust::identity<bool>()); // } void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; bool* nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double* nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); int* adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[0])); MembraneType1* memNodeType1 = thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[0])); thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, nodeIsActiveAddress, nodeXPosAddress, nodeYPosAddress, adhIndxAddr, memNodeType1), thrust::identity<bool>()); for (int z = 0; z < allocPara_m.currentActiveCellCount; z++){ if (cellInfoVecs.isMembrAddingNode[z] == true){ nodes->isMemNodeTypeAssigned_postAddNode=false; cellInfoVecs.isPostAddMembrNodes = true; break; } } } //Ali void SceCells::delMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.minTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())) + curAcCCount, cellInfoVecs.isMembrRemovingNode.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), DelMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr,growthAuxData.memNodeType1Address), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpNodeType1[index] = divAuxData.tmp1VecMemNodeType[j] ; //Ali // std::cout<<"divAuxData.tmpPos1_M["<<index<<"] = "<<divAuxData.tmpXPos1_M[index]<<" "<<divAuxData.tmpYPos1_M[index]<<", tmpNodeType1 = "<<divAuxData.tmpNodeType1[index]<<std::endl; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpNodeType2[index] = divAuxData.tmp2VecMemNodeType[j] ; //Ali // std::cout<<"divAuxData.tmpPos2_M["<<index<<"] = "<<divAuxData.tmpXPos2_M[index]<<" "<<divAuxData.tmpYPos2_M[index]<<", tmpNodeType2 = "<<divAuxData.tmpNodeType2[index]<<std::endl; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); std::cout<<"divAuxData.tmp1MemActiveCounts size = "<<divAuxData.tmp1MemActiveCounts[0]<<std::endl; divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); std::cout<<"divAuxData.tmp2MemActiveCounts size = "<<divAuxData.tmp2MemActiveCounts[0]<<std::endl; for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpNodeType1[index] = notAssigned1 ; //Ali divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpNodeType2[index] = notAssigned1 ; //Ali divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } // we have two new center of internal node positions. // we already shrinked the internal nodes around their old internal nodes center // here we shift the internal nodes of each cell around the new internal node position void SceCells::shiftIntnlNodesByCellCenter(CVector intCell1Center, CVector intCell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = intCell1Center - tmpCell1Center; // it should be new nucleus center for cell1 for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = intCell2Center - tmpCell2Center; // it should be new nucleus center for cell 2 for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(uint i, std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2, CVector oldNucleusCenter) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); divAuxData.tmp1VecMemNodeType.clear(); //Ali divAuxData.tmp2VecMemNodeType.clear(); //Ali uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); // Here we perform a cross-product computation to check the node order orientation. If it is clockwise, reverse it so it is counter-clockwise. double vec1x = (tmp1[0].vec.x - oldNucleusCenter.x); double vec2x = (tmp1[1].vec.x - oldNucleusCenter.x); double vec1y = (tmp1[0].vec.y - oldNucleusCenter.y); double vec2y = (tmp1[1].vec.y - oldNucleusCenter.y); double orientation = vec1x*vec2y - vec2x*vec1y; if (orientation < 0){ std::cout<<"tmp1 orientation incorrect. Rearranging tmp1."<<std::endl; std::vector<VecValT> tmp_tmp1 = tmp1; tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[(tmp_tmp1.size()-1) - i]); } } vec1x = (tmp2[0].vec.x - oldNucleusCenter.x); vec2x = (tmp2[1].vec.x - oldNucleusCenter.x); vec1y = (tmp2[0].vec.y - oldNucleusCenter.y); vec2y = (tmp2[1].vec.y - oldNucleusCenter.y); orientation = vec1x*vec2y - vec2x*vec1y; if (orientation < 0){ std::cout<<"tmp2 orientation incorrect. Rearranging tmp2."<<std::endl; std::vector<VecValT> tmp_tmp2 = tmp2; tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[(tmp_tmp2.size()-1) - i]); } } // std::vector<VecValT> sorted_tmp1 = tmp1; // std::vector<VecValT> sorted_tmp2 = tmp2; // std::sort(sorted_tmp1.begin(), sorted_tmp1.end()); // std::sort(sorted_tmp2.begin(), sorted_tmp2.end()); // for (int j = 0; j < tmp1.size(); j++){ // std::cout<<"tmp1["<<j<<"].val = "<<tmp1[j].val<<", vec = "<<tmp1[j].vec.x<<" "<<tmp1[j].vec.y<<std::endl; // } // for (int j = 0; j < tmp2.size(); j++){ // std::cout<<"tmp2["<<j<<"].val = "<<tmp2[j].val<<", vec = "<<tmp2[j].vec.x<<" "<<tmp2[j].vec.y<<std::endl; // } //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; uint Num_of_NodeType0_MotherCell = 0, Num_of_NodeType0_DaughterCell = 0, Num_of_NodeType1_DaughterCell = 0, Num_of_NodeType1_MotherCell = 0; if (divAuxData.isMotherCellBehind[i] == true){ // Recall that tmp1 will always be the mother cell data //Kevin for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralB){ Num_of_NodeType0_MotherCell+=1; } } if (Num_of_NodeType0_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralA){ Num_of_NodeType1_DaughterCell+=1; } } if (Num_of_NodeType1_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introducedj for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } // Num_of_NodeType0_MotherCell = ; // Num_of_NodeType1_DaughterCell = ; } else{ for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralA){ Num_of_NodeType1_MotherCell+=1; } } if (Num_of_NodeType1_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralB){ Num_of_NodeType0_DaughterCell+=1; } } if (Num_of_NodeType0_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introduced for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } } std::cout<<"Num_of_NodeType0_MotherCell = "<<Num_of_NodeType0_MotherCell<<std::endl; std::cout<<"Num_of_NodeType1_MotherCell = "<<Num_of_NodeType1_MotherCell<<std::endl; std::cout<<"Num_of_NodeType0_DaughterCell = "<<Num_of_NodeType0_DaughterCell<<std::endl; std::cout<<"Num_of_NodeType1_DaughterCell = "<<Num_of_NodeType1_DaughterCell<<std::endl; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, Num_of_NodeType0_MotherCell, maxDivMembrNodeCount1); } else{ ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, Num_of_NodeType1_MotherCell, maxDivMembrNodeCount1); } // std::cout<<"tmp1[tmp1.size() - 1] = "<<tmp1[tmp1.size() - 1].vec.x<<" "<<tmp1[tmp1.size()-1].vec.y<<std::endl; // std::cout<<"tmp1[0] = "<<tmp1[0].vec.x<<" "<<tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); // std::cout<<"sorted_tmp1[tmp1.size() - 1].val = "<<sorted_tmp1[sorted_tmp1.size()-1].val<<", .vec = "<< sorted_tmp1[sorted_tmp1.size() - 1].vec.x<<" "<<sorted_tmp1[sorted_tmp1.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp1[0].val = "<<sorted_tmp1[0].val<<", .vec = "<<sorted_tmp1[0].vec.x<<" "<<sorted_tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(sorted_tmp1[sorted_tmp1.size() - 1].vec, sorted_tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, Num_of_NodeType1_DaughterCell, maxDivMembrNodeCount2); } else{ ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, Num_of_NodeType0_DaughterCell, maxDivMembrNodeCount2); } // std::cout<<"tmp2[tmp1.size() - 1] = "<<tmp2[tmp2.size() - 1].vec.x<<" "<<tmp2[tmp2.size()-1].vec.y<<std::endl; // std::cout<<"tmp2[0] = "<<tmp2[0].vec.x<<" "<<tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); // std::cout<<"sorted_tmp2[sorted_tmp2.size() - 1].val = "<<sorted_tmp2[sorted_tmp2.size()-1].val<<", .vec = "<<sorted_tmp2[sorted_tmp2.size() - 1].vec.x<<" "<<sorted_tmp2[sorted_tmp2.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp2[0].val = "<<sorted_tmp2[0].val<<", vec = "<<sorted_tmp2[0].vec.x<<" "<<sorted_tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(sorted_tmp2[sorted_tmp2.size() - 1].vec, sorted_tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); divAuxData.tmp1VecMemNodeType.push_back(tmp1[j].type); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); divAuxData.tmp2VecMemNodeType.push_back(tmp2[j].type); } std::cout<<"ptsBetween1 size = "<<ptsBetween1.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } std::cout<<"ptsBetween2 size = "<<ptsBetween2.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } //Here we will try to adjust the positions of newly added nodes (from ptsBetween1 and ptsBetween2) according to the 'center line' of the cell. assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::processMemVec_Ver2(uint i, std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2, CVector oldNucleusCenter, std::vector<CVector>& cellCenterLine_Basal2Apical, std::vector<CVector>& cellCenterLine_Apical2Basal, std::vector<CVector>& cellCenterLine_Basal2Apical_leftShift, std::vector<CVector>& cellCenterLine_Basal2Apical_rightShift, std::vector<CVector>& cellCenterLine_Apical2Basal_leftShift, std::vector<CVector>& cellCenterLine_Apical2Basal_rightShift, std::vector<double>& cellCenterLine_MirrorLength_Basal2Apical, std::vector<double>& cellCenterLine_MirrorLength_Apical2Basal) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); divAuxData.tmp1VecMemNodeType.clear(); //Ali divAuxData.tmp2VecMemNodeType.clear(); //Ali uint membThreshold = allocPara_m.maxMembrNodePerCell; // std::sort(tmp1.begin(), tmp1.end()); // std::sort(tmp2.begin(), tmp2.end()); // Here we perform a cross-product computation to check the node order orientation. If it is clockwise, reverse it so it is counter-clockwise. double vec1x = (tmp1[0].vec.x - oldNucleusCenter.x); double vec2x = (tmp1[1].vec.x - oldNucleusCenter.x); double vec1y = (tmp1[0].vec.y - oldNucleusCenter.y); double vec2y = (tmp1[1].vec.y - oldNucleusCenter.y); double orientation = vec1x*vec2y - vec2x*vec1y; std::cout<<"tmp1 orientation = "<<orientation<<std::endl; if (orientation < 0){ std::cout<<"tmp1 orientation incorrect. Rearranging tmp1."<<std::endl; std::vector<VecValT> tmp_tmp1 = tmp1; for (int i = 0; i < tmp1.size(); i++){ // std::cout<<"tmp1 "<<tmp1[i].vec.x<<" "<<tmp1[i].vec.y<<" "<<tmp1[i].val<<" "<<tmp1[i].type<<std::endl; } tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[(tmp_tmp1.size()-1) - i]); } // for (int i = 0; i < tmp_tmp1.size(); i++){ // std::cout<<"tmp_tmp1 "<<tmp_tmp1[i].vec.x<<" "<<tmp_tmp1[i].vec.y<<" "<<tmp_tmp1[i].val<<" "<<tmp_tmp1[i].type<<std::endl; // } } vec1x = (tmp2[0].vec.x - oldNucleusCenter.x); vec2x = (tmp2[1].vec.x - oldNucleusCenter.x); vec1y = (tmp2[0].vec.y - oldNucleusCenter.y); vec2y = (tmp2[1].vec.y - oldNucleusCenter.y); orientation = vec1x*vec2y - vec2x*vec1y; std::cout<<"tmp2 orientation = "<<orientation<<std::endl; if (orientation < 0){ std::cout<<"tmp2 orientation incorrect. Rearranging tmp2."<<std::endl; std::vector<VecValT> tmp_tmp2 = tmp2; for (int i = 0; i < tmp2.size(); i++){ // std::cout<<"tmp2 "<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<" "<<tmp2[i].val<<" "<<tmp2[i].type<<std::endl; } tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[(tmp_tmp2.size()-1) - i]); } // for (int i = 0; i < tmp_tmp2.size(); i++){ // std::cout<<"tmp_tmp2 "<<tmp_tmp2[i].vec.x<<" "<<tmp_tmp2[i].vec.y<<" "<<tmp_tmp2[i].val<<" "<<tmp_tmp2[i].type<<std::endl; // } } // std::vector<VecValT> sorted_tmp1 = tmp1; // std::vector<VecValT> sorted_tmp2 = tmp2; // std::sort(sorted_tmp1.begin(), sorted_tmp1.end()); // std::sort(sorted_tmp2.begin(), sorted_tmp2.end()); // for (int j = 0; j < sorted_tmp1.size(); j++){ // std::cout<<"sorted_tmp1["<<j<<"].val = "<<sorted_tmp1[j].val<<", vec = "<<sorted_tmp1[j].vec.x<<" "<<sorted_tmp1[j].vec.y<<std::endl; // } // for (int j = 0; j < sorted_tmp2.size(); j++){ // std::cout<<"sorted_tmp2["<<j<<"].val = "<<sorted_tmp2[j].val<<", vec = "<<sorted_tmp2[j].vec.x<<" "<<sorted_tmp2[j].vec.y<<std::endl; // } // for (int j = 0; j < tmp1.size(); j++){ // std::cout<<"tmp1["<<j<<"].val = "<<tmp1[j].val<<", vec = "<<tmp1[j].vec.x<<" "<<tmp1[j].vec.y<<" , type = "<<tmp2[j].type<<std::endl; // } // for (int j = 0; j < tmp2.size(); j++){ // std::cout<<"tmp2["<<j<<"].val = "<<tmp2[j].val<<", vec = "<<tmp2[j].vec.x<<" "<<tmp2[j].vec.y<<" , type = "<<tmp2[j].type<<std::endl; // } // //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); // //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; uint Num_of_NodeType0_MotherCell = 0, Num_of_NodeType0_DaughterCell = 0, Num_of_NodeType1_DaughterCell = 0, Num_of_NodeType1_MotherCell = 0; if (divAuxData.isMotherCellBehind[i] == true){ // Recall that tmp1 will always be the mother cell data //Kevin for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralB){ Num_of_NodeType0_MotherCell+=1; } } if (Num_of_NodeType0_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralA){ Num_of_NodeType1_DaughterCell+=1; } } if (Num_of_NodeType1_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introducedj for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } // Num_of_NodeType0_MotherCell = ; // Num_of_NodeType1_DaughterCell = ; } else{ for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralA){ Num_of_NodeType1_MotherCell+=1; } } if (Num_of_NodeType1_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralB){ Num_of_NodeType0_DaughterCell+=1; } } if (Num_of_NodeType0_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introduced for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } } std::cout<<"Num_of_NodeType0_MotherCell = "<<Num_of_NodeType0_MotherCell<<std::endl; std::cout<<"Num_of_NodeType1_MotherCell = "<<Num_of_NodeType1_MotherCell<<std::endl; std::cout<<"Num_of_NodeType0_DaughterCell = "<<Num_of_NodeType0_DaughterCell<<std::endl; std::cout<<"Num_of_NodeType1_DaughterCell = "<<Num_of_NodeType1_DaughterCell<<std::endl; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // Num_of_NodeType0_MotherCell, maxDivMembrNodeCount1); ptsBetween1 = obtainPtsBetween_cellCenterLine(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, cellCenterLine_Basal2Apical, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_MirrorLength_Basal2Apical); } else{ // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // Num_of_NodeType1_MotherCell, maxDivMembrNodeCount1); ptsBetween1 = obtainPtsBetween_cellCenterLine(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, cellCenterLine_Apical2Basal, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Apical2Basal); } // std::cout<<"tmp1[tmp1.size() - 1] = "<<tmp1[tmp1.size() - 1].vec.x<<" "<<tmp1[tmp1.size()-1].vec.y<<std::endl; // std::cout<<"tmp1[0] = "<<tmp1[0].vec.x<<" "<<tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); // std::cout<<"sorted_tmp1[tmp1.size() - 1].val = "<<sorted_tmp1[sorted_tmp1.size()-1].val<<", .vec = "<< sorted_tmp1[sorted_tmp1.size() - 1].vec.x<<" "<<sorted_tmp1[sorted_tmp1.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp1[0].val = "<<sorted_tmp1[0].val<<", .vec = "<<sorted_tmp1[0].vec.x<<" "<<sorted_tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(sorted_tmp1[sorted_tmp1.size() - 1].vec, sorted_tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // Num_of_NodeType1_DaughterCell, maxDivMembrNodeCount2); ptsBetween2 = obtainPtsBetween_cellCenterLine(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, cellCenterLine_Apical2Basal, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Apical2Basal); } else{ // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // Num_of_NodeType0_DaughterCell, maxDivMembrNodeCount2); ptsBetween2 = obtainPtsBetween_cellCenterLine(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, cellCenterLine_Basal2Apical, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_MirrorLength_Basal2Apical); } // std::cout<<"tmp2[tmp1.size() - 1] = "<<tmp2[tmp2.size() - 1].vec.x<<" "<<tmp2[tmp2.size()-1].vec.y<<std::endl; // std::cout<<"tmp2[0] = "<<tmp2[0].vec.x<<" "<<tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); // std::cout<<"sorted_tmp2[sorted_tmp2.size() - 1].val = "<<sorted_tmp2[sorted_tmp2.size()-1].val<<", .vec = "<<sorted_tmp2[sorted_tmp2.size() - 1].vec.x<<" "<<sorted_tmp2[sorted_tmp2.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp2[0].val = "<<sorted_tmp2[0].val<<", vec = "<<sorted_tmp2[0].vec.x<<" "<<sorted_tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(sorted_tmp2[sorted_tmp2.size() - 1].vec, sorted_tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); divAuxData.tmp1VecMemNodeType.push_back(tmp1[j].type); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); divAuxData.tmp2VecMemNodeType.push_back(tmp2[j].type); } std::cout<<"ptsBetween1 size = "<<ptsBetween1.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralA); } std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } std::cout<<"ptsBetween2 size = "<<ptsBetween2.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralB); } std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } //Here we will try to adjust the positions of newly added nodes (from ptsBetween1 and ptsBetween2) according to the 'center line' of the cell. assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } //Ali void SceCells::obtainMembrAndIntnlNodesPlusNodeType(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; MembraneType1 nodeTypeI = divAuxData.tmpNodeType[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } void SceCells::obtainMembrAndIntnlNodesPlusNodeType2(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv, vector<CVector>& cellCenterLine_Basal2Apical, vector<CVector>& cellCenterLine_Apical2Basal, vector<CVector>& cellCenterLine_Basal2Apical_leftShift, vector<CVector>& cellCenterLine_Basal2Apical_rightShift, vector<CVector>& cellCenterLine_Apical2Basal_leftShift, vector<CVector>& cellCenterLine_Apical2Basal_rightShift, vector<double>& cellCenterLine_MirrorLength_Basal2Apical, vector<double>& cellCenterLine_MirrorLength_Apical2Basal) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Apical2Basal.clear(); cellCenterLine_Apical2Basal_leftShift.clear(); cellCenterLine_Apical2Basal_rightShift.clear(); cellCenterLine_Basal2Apical_leftShift.clear(); cellCenterLine_Basal2Apical_rightShift.clear(); cellCenterLine_MirrorLength_Basal2Apical.clear(); cellCenterLine_MirrorLength_Apical2Basal.clear(); vector<CVector> tmpCellCenterLine_Basal2Apical; vector<CVector> tmpCellCenterLine_Basal2Apical_leftShift; vector<CVector> tmpCellCenterLine_Basal2Apical_rightShift; vector<double> tmpCellCenterLine_MirrorLength_Basal2Apical; vector<CVector> tmpCellCenterLine_Apical2Basal; vector<CVector> tmpCellCenterLine_Apical2Basal_leftShift; vector<CVector> tmpCellCenterLine_Apical2Basal_rightShift; vector<double> tmpCellCenterLine_MirrorLength_Apical2Basal; tmpCellCenterLine_Basal2Apical.clear(); tmpCellCenterLine_Basal2Apical_leftShift.clear(); tmpCellCenterLine_Basal2Apical_rightShift.clear(); tmpCellCenterLine_MirrorLength_Basal2Apical.clear(); tmpCellCenterLine_Apical2Basal.clear(); tmpCellCenterLine_Apical2Basal_leftShift.clear(); tmpCellCenterLine_Apical2Basal_rightShift.clear(); tmpCellCenterLine_MirrorLength_Apical2Basal.clear(); bool firstloop_Basal2Apical = true; bool firstloop_Apical2Basal = true; bool lateralA_earlyShift = false; bool lateralB_earlyShift = false; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index, nextIndex; int lateralACount = 0; int lateralBCount = 0; int initLateralACount = 0; int initLateralBCount = 0; std::cout<<"i = "<<i<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpNodeType[index]==lateralA){ initLateralACount+=1; } if (divAuxData.tmpNodeType[index]==lateralB){ initLateralBCount+=1; } } std::cout<<"initLateralACount = "<<initLateralACount<<std::endl; std::cout<<"initLateralBCount = "<<initLateralBCount<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { // std::cout<<"j = "<<j<<std::endl; index = i * maxAllNodePerCell + j; nextIndex = index + 1; if (nextIndex >= i*maxAllNodePerCell+maxAllNodePerCell){ nextIndex = i*maxAllNodePerCell + 0; } if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; // std::cout<<"posX = "<<posX<<std::endl; double posY = divAuxData.tmpNodePosY_M[index]; // std::cout<<"posY = "<<posY<<std::endl; MembraneType1 nodeTypeI = divAuxData.tmpNodeType[index]; // std::cout<<"nodetype = "<<nodeTypeI<<std::endl; // std::cout<<"nodeTypeI = "<<nodeTypeI<<std::endl; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; CVector cellCenterLinePos; CVector cellCenterLinePos_leftUnitDir; CVector cellCenterLinePos_rightUnitDir; //Since we start by using nodes with label 'lateralA', the resulting cellCenterLinePos shoould orient from basal to apical. //But since mother and daughter cell will go through this center line in different orientation (even though both cells use //counterclockwise orientation). //Kevin if (nodeTypeI == lateralA){ // int mirrorNode = nodes->getInfoVecs().nodeAdhereIndex[j]; // std::cout<<"divAuxData.tmpNodeMemMirrorIndex_M size = "<<divAuxData.tmpNodeMemMirrorIndex_M.size()<<std::endl; cellCenterLinePos.x = (divAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (divAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; // std::cout<<"cellCenterLinePos = "<<cellCenterLinePos.x<<" "<<cellCenterLinePos.y<<std::endl; double length = sqrt((divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]) + (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLinePos_rightUnitDir.x = divAuxData.tmpNodePosX_M[index] - cellCenterLinePos.x; // std::cout<<"tmpNodePosX = "<<divAuxData.tmpNodePosX_M[index]<<" , cellCenterLinePos.x = "<<cellCenterLinePos.x<<std::endl; cellCenterLinePos_rightUnitDir.y = divAuxData.tmpNodePosY_M[index] - cellCenterLinePos.y; // std::cout<<"tmpNodePosY = "<<divAuxData.tmpNodePosY_M[index]<<" , cellCenterLinePos.y = "<<cellCenterLinePos.y<<std::endl; double rightLength = sqrt(cellCenterLinePos_rightUnitDir.x*cellCenterLinePos_rightUnitDir.x + cellCenterLinePos_rightUnitDir.y*cellCenterLinePos_rightUnitDir.y); // std::cout<<"rightLength = "<<rightLength<<std::endl; cellCenterLinePos_rightUnitDir.x = cellCenterLinePos_rightUnitDir.x/rightLength; cellCenterLinePos_rightUnitDir.y = cellCenterLinePos_rightUnitDir.y/rightLength; cellCenterLinePos_leftUnitDir.x = nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.x; // std::cout<<"nodeLocX[tmpNodeMirrorIndex] = "<<nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]<<" , cellCenterLinePos.x = "<<cellCenterLinePos.x<<std::endl; cellCenterLinePos_leftUnitDir.y = nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.y; // std::cout<<"nodeLocY[tmpNodeMirrorIndex] = "<<nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]]<<" , cellCenterLinePos.y = "<<cellCenterLinePos.y<<std::endl; double leftLength = sqrt(cellCenterLinePos_leftUnitDir.x*cellCenterLinePos_leftUnitDir.x + cellCenterLinePos_leftUnitDir.y*cellCenterLinePos_leftUnitDir.y); // std::cout<<"leftLength = "<<leftLength<<std::endl; cellCenterLinePos_leftUnitDir.x = cellCenterLinePos_leftUnitDir.x/leftLength; cellCenterLinePos_leftUnitDir.y = cellCenterLinePos_leftUnitDir.y/leftLength; cellCenterLine_Basal2Apical.push_back(cellCenterLinePos); lateralACount+=1; cellCenterLine_Basal2Apical_leftShift.push_back(cellCenterLinePos_leftUnitDir); // std::cout<<"cellCenterLine Basal2Apical left shift: "<<cellCenterLinePos_leftUnitDir.x<<" "<<cellCenterLinePos_leftUnitDir.y<<std::endl; cellCenterLine_Basal2Apical_rightShift.push_back(cellCenterLinePos_rightUnitDir); // std::cout<<"cellCenterLine Basal2Apical right shift: "<<cellCenterLinePos_rightUnitDir.x<<" "<<cellCenterLinePos_rightUnitDir.y<<std::endl; cellCenterLine_MirrorLength_Basal2Apical.push_back(length); if (firstloop_Basal2Apical == true){ tmpCellCenterLine_Basal2Apical.push_back(cellCenterLinePos); tmpCellCenterLine_Basal2Apical_leftShift.push_back(cellCenterLinePos_leftUnitDir); tmpCellCenterLine_Basal2Apical_rightShift.push_back(cellCenterLinePos_rightUnitDir); tmpCellCenterLine_MirrorLength_Basal2Apical.push_back(length); } if (nodeTypeI == lateralA && divAuxData.tmpNodeType[nextIndex] != lateralA){ // std::cout<<"Node type shift away from lateralA."<<std::endl; //This means that we hit a transition point from lateralA to apical1. Need to make sure if we have covered all lateralA nodes or not. //If not, this means that we started counting from the middle of lateralA nodes, which will causes serious problem later. //Hence we will try to remedy this issue here //Kevin if (lateralACount != initLateralACount){ lateralA_earlyShift = true; // std::cout<<"Node type shift away from lateralA before covering all lateralA type node."<<std::endl; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Basal2Apical_leftShift.clear(); cellCenterLine_Basal2Apical_rightShift.clear(); cellCenterLine_MirrorLength_Basal2Apical.clear(); } firstloop_Basal2Apical = false; // std::cout<<"firstloop_Basal2Apical = "<<firstloop_Basal2Apical<<std::endl; } } if (nodeTypeI == lateralB){ // int mirrorNode = nodes->getInfoVecs().nodeAdhereIndex[j]; cellCenterLinePos.x = (divAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (divAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]) + (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLinePos_leftUnitDir.x = divAuxData.tmpNodePosX_M[index] - cellCenterLinePos.x; cellCenterLinePos_leftUnitDir.y = divAuxData.tmpNodePosY_M[index] - cellCenterLinePos.y; double leftLength = sqrt(cellCenterLinePos_leftUnitDir.x*cellCenterLinePos_leftUnitDir.x + cellCenterLinePos_leftUnitDir.y*cellCenterLinePos_leftUnitDir.y); cellCenterLinePos_leftUnitDir.x = cellCenterLinePos_leftUnitDir.x/leftLength; cellCenterLinePos_leftUnitDir.y = cellCenterLinePos_leftUnitDir.y/leftLength; cellCenterLinePos_rightUnitDir.x = nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.x; cellCenterLinePos_rightUnitDir.y = nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.y; double rightLength = sqrt(cellCenterLinePos_rightUnitDir.x*cellCenterLinePos_rightUnitDir.x + cellCenterLinePos_rightUnitDir.y*cellCenterLinePos_rightUnitDir.y); cellCenterLinePos_rightUnitDir.x = cellCenterLinePos_rightUnitDir.x/rightLength; cellCenterLinePos_rightUnitDir.y = cellCenterLinePos_rightUnitDir.y/rightLength; cellCenterLine_Apical2Basal.push_back(cellCenterLinePos); lateralBCount+=1; cellCenterLine_Apical2Basal_leftShift.push_back(cellCenterLinePos_leftUnitDir); // std::cout<<"cellCenterLine Apical2Basal left shift: "<<cellCenterLinePos_leftUnitDir.x<<" "<<cellCenterLinePos_leftUnitDir.y<<std::endl; cellCenterLine_Apical2Basal_rightShift.push_back(cellCenterLinePos_rightUnitDir); // std::cout<<"cellCenterLine Apical2Basal right shift: "<<cellCenterLinePos_rightUnitDir.x<<" "<<cellCenterLinePos_rightUnitDir.y<<std::endl; cellCenterLine_MirrorLength_Apical2Basal.push_back(length); if (firstloop_Apical2Basal == true){ tmpCellCenterLine_Apical2Basal.push_back(cellCenterLinePos); tmpCellCenterLine_Apical2Basal_leftShift.push_back(cellCenterLinePos_leftUnitDir); tmpCellCenterLine_Apical2Basal_rightShift.push_back(cellCenterLinePos_rightUnitDir); tmpCellCenterLine_MirrorLength_Apical2Basal.push_back(length); } if (nodeTypeI == lateralB && divAuxData.tmpNodeType[nextIndex] != lateralB){ // std::cout<<"Node type shift away from lateralB."<<std::endl; //This means that we hit a transition point from lateralA to apical1. Need to make sure if we have covered all lateralA nodes or not. //If not, this means that we started counting from the middle of lateralA nodes, which will causes serious problem later. //Hence we will try to remedy this issue here //Kevin if (lateralBCount != initLateralBCount){ lateralB_earlyShift = true; // std::cout<<"Node type shift away from lateralB before covering all lateralB type node."<<std::endl; cellCenterLine_Apical2Basal.clear(); cellCenterLine_Apical2Basal_leftShift.clear(); cellCenterLine_Apical2Basal_rightShift.clear(); cellCenterLine_MirrorLength_Apical2Basal.clear(); } firstloop_Apical2Basal = false; // std::cout<<"firstloop_Apical2Basal = "<<firstloop_Apical2Basal<<std::endl; } } } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } if (lateralA_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Basal2Apical.size(); p++){ cellCenterLine_Basal2Apical.push_back(tmpCellCenterLine_Basal2Apical[p]); cellCenterLine_Basal2Apical_leftShift.push_back(tmpCellCenterLine_Basal2Apical_leftShift[p]); cellCenterLine_Basal2Apical_rightShift.push_back(tmpCellCenterLine_Basal2Apical_rightShift[p]); cellCenterLine_MirrorLength_Basal2Apical.push_back(tmpCellCenterLine_MirrorLength_Basal2Apical[p]); } } // std::cout<<"cellCenterLine_Basal2Apical size = "<<cellCenterLine_Basal2Apical.size()<<std::endl; // std::cout<<"cellCenterLine_Basal2Apical_leftShift size = "<<cellCenterLine_Basal2Apical_leftShift.size()<<std::endl; // std::cout<<"cellCenterLine_Basal2Apical_rightShift size = "<<cellCenterLine_Basal2Apical_rightShift.size()<<std::endl; // std::cout<<"cellCenterLine_MirrorLength_Basal2Apical size = "<<cellCenterLine_MirrorLength_Basal2Apical.size()<<std::endl; if (lateralB_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Apical2Basal.size(); p++){ cellCenterLine_Apical2Basal.push_back(tmpCellCenterLine_Apical2Basal[p]); cellCenterLine_Apical2Basal_leftShift.push_back(tmpCellCenterLine_Apical2Basal_leftShift[p]); cellCenterLine_Apical2Basal_rightShift.push_back(tmpCellCenterLine_Apical2Basal_rightShift[p]); cellCenterLine_MirrorLength_Apical2Basal.push_back(tmpCellCenterLine_MirrorLength_Apical2Basal[p]); } } // std::cout<<"cellCenterLine_Apical2Basal size = "<<cellCenterLine_Apical2Basal.size()<<std::endl; // std::cout<<"cellCenterLine_Apical2Basal_leftShift size = "<<cellCenterLine_Apical2Basal_leftShift.size()<<std::endl; // std::cout<<"cellCenterLine_Apical2Basal_rightShift size = "<<cellCenterLine_Apical2Basal_rightShift.size()<<std::endl; // std::cout<<"cellCenterLine_MirrorLength_Apical2Basal size = "<<cellCenterLine_MirrorLength_Apical2Basal.size()<<std::endl; //Now we check to see if the cellCenterLine data structure is built correctly. The two should be identical except with opposite ordering. } void SceCells::obtainMembrAndIntnlNodesPlusNodeType2_printingOnly(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv, vector<CVector>& cellCenterLine_Basal2Apical, vector<CVector>& cellCenterLine_Apical2Basal) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Apical2Basal.clear(); vector<CVector> tmpCellCenterLine_Basal2Apical; vector<CVector> tmpCellCenterLine_Apical2Basal; tmpCellCenterLine_Basal2Apical.clear(); tmpCellCenterLine_Apical2Basal.clear(); bool firstloop_Basal2Apical = true; bool firstloop_Apical2Basal = true; bool lateralA_earlyShift = false; bool lateralB_earlyShift = false; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index, nextIndex; int lateralACount = 0; int lateralBCount = 0; int initLateralACount = 0; int initLateralBCount = 0; std::cout<<"i = "<<i<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (printAuxData.tmpNodeType[index]==lateralA){ initLateralACount+=1; } if (printAuxData.tmpNodeType[index]==lateralB){ initLateralBCount+=1; } } std::cout<<"initLateralACount_printOnly = "<<initLateralACount<<std::endl; std::cout<<"initLateralBCount_printOnly = "<<initLateralBCount<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { // std::cout<<"j = "<<j<<std::endl; index = i * maxAllNodePerCell + j; nextIndex = index + 1; if (nextIndex >= i*maxAllNodePerCell+maxAllNodePerCell){ nextIndex = i*maxAllNodePerCell + 0; } if (printAuxData.tmpIsActive_M[index] != true) { continue; } // std::cout<<"debug 1"<<std::endl; double posX = printAuxData.tmpNodePosX_M[index]; double posY = printAuxData.tmpNodePosY_M[index]; MembraneType1 nodeTypeI = printAuxData.tmpNodeType[index]; // std::cout<<"debug 2"<<std::endl; if (j < membThreshold) { CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; CVector cellCenterLinePos; CVector cellCenterLinePos_leftUnitDir; CVector cellCenterLinePos_rightUnitDir; if (nodeTypeI == lateralA){ cellCenterLinePos.x = (printAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (printAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]]) + (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLine_Basal2Apical.push_back(cellCenterLinePos); lateralACount+=1; if (firstloop_Basal2Apical == true){ tmpCellCenterLine_Basal2Apical.push_back(cellCenterLinePos); } if (nodeTypeI == lateralA && printAuxData.tmpNodeType[nextIndex] != lateralA){ if (lateralACount != initLateralACount){ lateralA_earlyShift = true; cellCenterLine_Basal2Apical.clear(); } firstloop_Basal2Apical = false; } } if (nodeTypeI == lateralB){ cellCenterLinePos.x = (printAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (printAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]]) + (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLine_Apical2Basal.push_back(cellCenterLinePos); lateralBCount+=1; if (firstloop_Apical2Basal == true){ tmpCellCenterLine_Apical2Basal.push_back(cellCenterLinePos); } if (nodeTypeI == lateralB && printAuxData.tmpNodeType[nextIndex] != lateralB){ if (lateralBCount != initLateralBCount){ lateralB_earlyShift = true; cellCenterLine_Apical2Basal.clear(); } firstloop_Apical2Basal = false; } } } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } // std::cout<<"debug 3"<<std::endl; } if (lateralA_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Basal2Apical.size(); p++){ cellCenterLine_Basal2Apical.push_back(tmpCellCenterLine_Basal2Apical[p]); } } if (lateralB_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Apical2Basal.size(); p++){ cellCenterLine_Apical2Basal.push_back(tmpCellCenterLine_Apical2Basal[p]); } } } /* Ali CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } */ CVector SceCells::obtainCellCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::obtainNucleusCenter(uint i, vector<CVector> IntnlNodes){ for (int j = 0; j < IntnlNodes.size(); j++){ divAuxData.tmpNucleusCenterPosX_M[i] += IntnlNodes[j].x; divAuxData.tmpNucleusCenterPosY_M[i] += IntnlNodes[j].y; } double oldCenterX = divAuxData.tmpNucleusCenterPosX_M[i]/IntnlNodes.size(); double oldCenterY = divAuxData.tmpNucleusCenterPosY_M[i]/IntnlNodes.size(); CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } //Kevin CVector SceCells::obtainIntCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } /* CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } */ CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double minDiff = 10000; CVector minorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff < minDiff) { minDiff = diff; minorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = minDiff; return minorAxisDir; } CVector SceCells::calDivDir_ApicalBasal(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis, vector<MembraneType1> & nodeTypeIndxDiv) { // not the optimal algorithm but easy to code double minDiff = 10000; CVector minorAxisDir; int minPointAdhIndex ; int maxPointAdhIndex; //for (uint i = 0; i < membrNodes.size(); i++) { // cout <<"adhesion index for dividing cell node"<<i<<"is" << adhIndxDiv[i] <<endl; // } //return 0 ; for (uint i = 0; i < membrNodes.size(); i++) { if ( (nodeTypeIndxDiv[i]!=lateralA) && (nodeTypeIndxDiv[i]!=lateralB) ) { continue ; } CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; //distance finder for node i to the opposite nodes //Ali for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; // minimum distance finder for each cells to be used for cell center shifting. It should also need to be a node that have neighbor if (diff < minDiff ) { minDiff = diff; minorAxisDir = tmpUnitDir; // adhesionIndexFinal=adhIndxDiv[i]; } } lenAlongMajorAxis = minDiff; return minorAxisDir; } std::pair <int ,int> SceCells::calApicalBasalRingIds(CVector divDir, CVector center,vector<CVector>& membrNodes, vector<MembraneType1> & nodeTypeIndxDiv) { int idMin, idMax ; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; CVector tmpUnitDir2 = tmpDir2.getUnitVector(); double tmpVecProduct = splitDir * tmpUnitDir2; if (tmpVecProduct < min) { min = tmpVecProduct; idMin=j ; } if (tmpVecProduct > max) { max = tmpVecProduct; idMax=j ; } } cout << " contractile node location is " << membrNodes[idMin].x << " ," << membrNodes[idMin].y << endl ; cout << " contractile node location is " << membrNodes[idMax].x << " ," << membrNodes[idMax].y << endl ; if (nodeTypeIndxDiv[idMin]==apical1) { return make_pair(idMin,idMax) ; } else { return make_pair(idMax,idMin) ; } } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector cellCenter, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - cellCenter; //it is cell center CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; // it is minor axis } void SceCells::obtainTwoNewIntCenters(CVector& oldIntCenter, CVector& divDir, double len_MajorAxis, CVector& intCenterNew1, CVector& intCenterNew2) { CVector divDirUnit = divDir.getUnitVector(); std::cout<<"divDirUnit = "<<divDirUnit.x<<" "<<divDirUnit.y<<std::endl; double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; // this means small axis intCenterNew1 = oldIntCenter + lenChange * divDirUnit; // it should be nucleus center intCenterNew2 = oldIntCenter - lenChange * divDirUnit; // it should be nulceus center CVector centerTissue ; //Ali centerTissue=CVector (40.0, 40.0, 0.0) ; //Ali should be imported CVector tmpVec1=intCenterNew1-centerTissue ; //Ali // assuming New1 is mother cell CVector tmpVec2=intCenterNew2-centerTissue ; //Ali CVector tmpDiff=tmpVec2-tmpVec1 ; CVector tmpCross=Cross(tmpVec1,tmpVec2) ; //Ali bool isMotherCellBehindInt=false ; //Ali // assuming CCW is the initial order of cell ranks //if (tmpCross.z>0){ // if (tmpDiff.x>0){ isMotherCellBehindInt=true ; // } //Ali std::cout<<"isMotherCellBehindInt = "<<isMotherCellBehindInt<<std::endl; divAuxData.isMotherCellBehind.push_back(isMotherCellBehindInt) ; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCellCenter,CVector oldIntCenter ,std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2) { tmp1.clear(); // is for membrane node of first cell tmp2.clear(); // is for membrane node of the second cell uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecValT tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); std::cout<<"splitDir = "<<splitDir.x<<" "<<splitDir.y<<std::endl; std::vector<double> oldCellCenter2BasalVec(2,0.0); std::vector<double> oldCellCenter2ApicalVec(2,0.0); oldCellCenter2BasalVec[0] = divAuxData.tmpBasalLoc[0] - oldCellCenter.x; oldCellCenter2BasalVec[1] = divAuxData.tmpBasalLoc[1] - oldCellCenter.y; oldCellCenter2ApicalVec[0] = divAuxData.tmpApicalLoc[0] - oldCellCenter.x; oldCellCenter2ApicalVec[1] = divAuxData.tmpApicalLoc[1] - oldCellCenter.y; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCellCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; tmpData.type=divAuxData.tmpNodeType[index] ; if (divAuxData.tmpNodeType[index] == lateralA){ tmp2.push_back(tmpData); } else if (divAuxData.tmpNodeType[index] == lateralB){ tmp1.push_back(tmpData); } else if (divAuxData.tmpNodeType[index] == apical1){ double crossProduct_withApical = centerToPosDir.x*oldCellCenter2ApicalVec[1] - centerToPosDir.y*oldCellCenter2ApicalVec[0]; // double crossProduct_withBasal = centerToPosDir.x*oldCellCenter2BasalVec[1] - centerToPosDir.y*oldCellCenter2BasalVec[0]; if (crossProduct_withApical >= 0){ tmp2.push_back(tmpData); } else if (crossProduct_withApical < 0){ tmp1.push_back(tmpData); } else {std::cout<<"None of the condition to determine if an apical node belongs to daughter cell or mother cell is met! Something is wrong!"<<std::endl;} } else if (divAuxData.tmpNodeType[index] == basal1){ // double crossProduct_withApical = centerToPosDir.x*oldCellCenter2ApicalVec[1] - centerToPosDir.y*oldCellCenter2ApicalVec[0] ; double crossProduct_withBasal = centerToPosDir.x*oldCellCenter2BasalVec[1] - centerToPosDir.y*oldCellCenter2BasalVec[0]; if (crossProduct_withBasal < 0){ tmp2.push_back(tmpData); } else if (crossProduct_withBasal >= 0){ tmp1.push_back(tmpData); } else {std::cout<<"None of the condition to determine if a basal node belongs to daughter cell or mother cell is met! Something is wrong!"<<std::endl;} } else{ std::cout<<"Active membrane node not assigned with any node type is present! Something is wrong!"<<std::endl; } /*CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCellCenter; // Ali it should be center of cells CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir;//This is the original way to determine which node belongs to which node belongs to mother and daughter cell // double dotProduct = centerToPosUnit.getUnitVector() * splitDir.getUnitVector(); // double dotProduct2 = centerToPosDir * divDir; tmpData.val = dotProduct; // for sorting the membrane nodes tmpData.vec = memPos; tmpData.type=divAuxData.tmpNodeType[index] ; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); }*/ //This is the original way to determine which node belongs to which node belongs to mother and daughter cell //Kevin // if (dotProduct2 >= 0){ // tmp1.push_back(tmpData); // } // else{ // tmp2.push_back(tmpData); // } } } else {// shrink the internal nodes around the internal node center if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldIntCenter; // center of nucleus is more biological CVector shrinkedPos = centerToPosDir * shrinkRatio + oldIntCenter; // CVector unitDivDir = divDir.getUnitVector(); // Ali // double AmpTanget=centerToPosDir*unitDivDir ; // Ali dot product of two vectors // double shrinkedAmpTanget=shrinkRatio*AmpTanget; // multiply two doubles //Ali // CVector TangetVShrink=unitDivDir*shrinkedAmpTanget; // shrink the tanget component //Ali // CVector TangetV= unitDivDir* AmpTanget; // get the tanget component to compute the normal vector //Ali // CVector NormV=centerToPosDir-TangetV ; // compute the normal vector Ali // CVector polarShrinkedPos=NormV+TangetVShrink ; // summation of shrinked tanget and as previous vector in the normal direction to division axis//Ali // CVector updatedV=polarShrinkedPos+oldCenter ; //Ali double dotProduct = centerToPosDir * divDir; //double dotProduct = polarShrinkedPos * divDir; //Ali if (dotProduct >= 0) { if (divAuxData.tmp1IntnlVec.size() >= 24){ divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } else{ divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } } else { if (divAuxData.tmp2IntnlVec.size() >= 24){ divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else{ divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } for (int i = 0; i < tmp1.size(); i++){ // std::cout<<"tmp1 ["<<i<<"] "<<tmp1[i].vec.x<<" "<<tmp1[i].vec.y<<" "<<tmp1[i].val<<" "<<tmp1[i].type<< std::endl; } for (int i = 0; i < tmp2.size(); i++){ // std::cout<<"tmp2 ["<<i<<"] "<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<" "<<tmp2[i].val<<" "<<tmp2[i].type<< std::endl; } int targetId_tmp2; for (int i = 0; i < tmp2.size(); i++){ int iNext = i+1; if (i == (tmp2.size()-1)){ iNext = 0; } // std::cout<<"tmp2 i = "<<i<<std::endl; // std::cout<<"temp2 iNext = "<<iNext<<std::endl; double dotProduct = ((tmp2[i].vec.x - oldCellCenter.x) * (tmp2[iNext].vec.x - oldCellCenter.x)) + (tmp2[i].vec.y - oldCellCenter.y) * (tmp2[iNext].vec.y - oldCellCenter.y) ; if (dotProduct < 0){ targetId_tmp2 = i; std::cout<<"i for dotProduct < 0, tmp2 = "<<i<<std::endl; } } // std::cout<<"tmp2 before rearranging"<<std::endl; // for (int i = 0; i < tmp2.size(); i++){ // std::cout<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<std::endl; // } if (targetId_tmp2 != (tmp2.size()-1)){ vector<VecValT> tmp_tmp2; int currentId = targetId_tmp2+1; for (int i = 0; i<tmp2.size(); i++){ tmp_tmp2.push_back(tmp2[currentId]); currentId += 1; if (currentId >= tmp2.size()){ currentId = currentId - tmp2.size(); } } tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[i]); } } else{ std::cout<<"No need to reorganize the order of tmp2Membr before adding new nodes"<<std::endl; } int targetId_tmp1; for (int i = 0; i < tmp1.size(); i++){ int iNext = i+1; if (i == (tmp1.size()-1)){ iNext = 0; } double dotProduct = (tmp1[i].vec.x - oldCellCenter.x) * (tmp1[iNext].vec.x - oldCellCenter.x) + (tmp1[i].vec.y - oldCellCenter.y) * (tmp1[iNext].vec.y - oldCellCenter.y) ; if (dotProduct < 0){ targetId_tmp1 = i; std::cout<<"i for dotProduct < 0, tmp1 = "<<i<<std::endl; } } if (targetId_tmp1 != (tmp1.size()-1)){ vector<VecValT> tmp_tmp1; int currentId = targetId_tmp1+1; for (int i = 0; i<tmp1.size(); i++){ tmp_tmp1.push_back(tmp1[currentId]); currentId += 1; if (currentId >= tmp1.size()){ currentId = currentId - tmp1.size(); } } tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[i]); } } else{ std::cout<<"No need to reorganize the order of tmp1Membr before adding new nodes"<<std::endl; } // oldCellCenter2BasalVec[0] = divAuxData.tmpBasalLoc[0] - oldCellCenter.x; // oldCellCenter2BasalVec[1] = divAuxData.tmpBasalLoc[1] - oldCellCenter.y; // double dotProduct_withBasal = -10000.0; // int targetId_tmp2 = -100; // for (int i = 0; i < tmp2.size(); i++){ // CVector tmp2_vec = tmp2[i].vec - oldCellCenter; // double tmp2_dotProduct_withBasal = tmp2_vec.x*oldCellCenter2BasalVec[0] + tmp2_vec.y*oldCellCenter2BasalVec[1]; // if (tmp2_dotProduct_withBasal >= dotProduct_withBasal){ // dotProduct_withBasal = tmp2_dotProduct_withBasal; // targetId_tmp2 = i; // } // } // vector<VecValT> tmp_tmp2; // int currentId = targetId_tmp2; // for (int i = 0; i<tmp2.size(); i++){ // tmp_tmp2.push_back(tmp2[currentId]); // currentId += 1; // if (currentId >= tmp2.size()){ // currentId = currentId - tmp2.size(); // } // } // tmp2.clear(); // for (int i = 0; i < tmp_tmp2.size(); i++){ // tmp2.push_back(tmp_tmp2[i]); // } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //Ali added to calculate pressure of each cell void SceCells::calCellPressure() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; // if the membrane node is not active or if this is an internal node it will automatically add with zero. thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, nodes->getInfoVecs().nodeF_MI_M_N.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.sumF_MI_M_N.begin()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, nodes->getInfoVecs().lagrangeFN.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.sumLagrangeFN.begin()); thrust:: transform(cellInfoVecs.sumF_MI_M_N.begin(), cellInfoVecs.sumF_MI_M_N.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.sumLagrangeFN.begin(), cellInfoVecs.cellPressure.begin(),thrust::plus<float>()) ; thrust:: transform(cellInfoVecs.cellPressure.begin(), cellInfoVecs.cellPressure.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPressure.begin(),thrust::divides<float>()) ; } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ if (cellInfoVecs.cellAreaVec[k] < 0){ cellInfoVecs.cellAreaVec[k] = -1.0*cellInfoVecs.cellAreaVec[k]; } } // !!!!!NOTE!!!!!! this is currently an ad hoc way to make sure area of each triangle is positive. cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI calCellPressure() ; // Ali //computeBasalLoc(); //Ali we call it here to compute the length of the cells CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy // thrust::host_vector<int> adhIndxHost = // nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhereIndex; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); ////////////// thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); ///////////// // std::cout<<"ISOLATE 1"<<std::endl; /////////////// thrust::host_vector<double> apicalLocXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> apicalLocYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocX.begin() + allocPara_m.currentActiveCellCount, apicalLocXHost.begin()); thrust::copy(cellInfoVecs.apicalLocY.begin(), cellInfoVecs.apicalLocY.begin() + allocPara_m.currentActiveCellCount, apicalLocYHost.begin()); /////////// // std::cout<<"ISOLATE 2"<<std::endl; /////////////// thrust::host_vector<double> basalLocXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> basalLocYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocX.begin() + allocPara_m.currentActiveCellCount, basalLocXHost.begin()); thrust::copy(cellInfoVecs.basalLocY.begin(), cellInfoVecs.basalLocY.begin() + allocPara_m.currentActiveCellCount, basalLocYHost.begin()); /////////// // std::cout<<"ISOLATE 3"<<std::endl; ////// thrust::host_vector<double> InternalAvgXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalAvgYHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMaxXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMaxYHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMinXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMinYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgX.begin() + allocPara_m.currentActiveCellCount, InternalAvgXHost.begin()); thrust::copy(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.InternalAvgY.begin() + allocPara_m.currentActiveCellCount, InternalAvgYHost.begin()); ///// // std::cout<<"ISOLATE 4"<<std::endl; thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellPressureHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cellPressure.begin(), cellInfoVecs.cellPressure.begin() + allocPara_m.currentActiveCellCount, cellPressureHost.begin());//Ali // std::cout<<"ISOLATE 4"<<std::endl; //LOOKS LIKE AN ERROR POP UP IN THIS SECTION.............. sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali // std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali // std::cout << "printing adhesion indicies 2"; for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; // std::cout << "printing adhesion indicies 3"; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } // std::cout << "printing adhesion indicies 4"; } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } // std::cout << "printing adhesion indicies 5"; } } // std::cout<<"ISOLATE 4.5"<<std::endl; cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellApicalLoc= CVector(apicalLocXHost[i], apicalLocYHost[i], 0); //Ali cellStatsData.cellBasalLoc= CVector(basalLocXHost[i], basalLocYHost[i], 0); //Ali cellStatsData.cellNucleusLoc = CVector(InternalAvgXHost[i], InternalAvgYHost[i], 0); // Ali cellStatsData.cellNucleusLocMax = CVector(InternalMaxXHost[i], InternalMaxYHost[i], 0); cellStatsData.cellNucleusLocMin = CVector(InternalMinXHost[i], InternalMinYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellPressure = cellPressureHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } // std::cout<<"ISOLATE 5"<<std::endl; //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } // std::cout<<"ISOLATE 6"<<std::endl; return result; } SingleCellData SceCells::OutputStressStrain() { SingleCellData result ; vector <double> nodeExtForceXHost; vector <double> nodeExtForceYHost; nodeExtForceXHost.resize(totalNodeCountForActiveCells); nodeExtForceYHost.resize(totalNodeCountForActiveCells); thrust::copy ( nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceX.begin()+ totalNodeCountForActiveCells, nodeExtForceXHost.begin()); thrust::copy ( nodes->getInfoVecs().nodeExtForceY.begin(), nodes->getInfoVecs().nodeExtForceY.begin()+ totalNodeCountForActiveCells, nodeExtForceYHost.begin()); // There is a compiling issue with using count_if on GPU. int numPositiveForces = count_if(nodeExtForceXHost.begin(),nodeExtForceXHost.end(),isGreaterZero() ) ; double totalExtPositiveForce =accumulate(nodeExtForceXHost.begin(),nodeExtForceXHost.end(),0.0, SumGreaterZero() ) ; cout << "number of positive external forces are=" <<numPositiveForces<<endl ; cout << "Total external forces are=" <<totalExtPositiveForce<<endl ; //thrust::device_vector<double>::iterator double MinX=*thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; //thrust::device_vector<double>::iterator double MaxX=*thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; result.Cells_Extrem_Loc[0]=MinX; result.Cells_Extrem_Loc[1]=MaxX; result.F_Ext_Out=totalExtPositiveForce ; return result ; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { //double equAngle = PI - PI / activeMembrCt; double equAngle = PI ; // - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } __device__ double CalMembrBendSpringEnergy(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return ( 0.5*bendCoeff * (angle - equAngle)*(angle - equAngle) ); } else{ return ( 0.5*bendCoeff * (angle - equAngle)*(angle - equAngle)* (bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)) ); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin(),// ALi added for cell pressure calculation nodes->getInfoVecs().nodeIIEnergy.begin(), nodes->getInfoVecs().nodeIMEnergy.begin())), AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M, isEnteringMitotic)); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " cell apical location is " << cellInfoVecs.apicalLocX[i] <<" , " <<cellInfoVecs.apicalLocY[i] << endl ; cout << "for cell rank "<<i<< " cell nucleus distance from apical is " << cellInfoVecs.nucDesireDistApical[i] << endl ; } */ } // void SceCells::applyMembContraction() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; // uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; // thrust::counting_iterator<uint> iBegin2(0); // double* nodeLocXAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeLocYAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().nodeF_MM_C_X.begin(), // nodes->getInfoVecs().nodeF_MM_C_Y.begin(), // nodes->getInfoVecs().nodeContractEnergyT.begin(), // nodes->getInfoVecs().basalContractPair.begin())), // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr)); // } // void SceCells::applyMembContraction(double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_Division, double distFromNucleus_max, double distFromNucleus_min, double percentage_before_timeRatio_Crit_Division_scaling) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; // uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; // thrust::counting_iterator<uint> iBegin2(0); // double* nodeLocXAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeLocYAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // // double* nodeLocZAddr = thrust::raw_pointer_cast( // // &(nodes->getInfoVecs().nodeLocZ[0])); // /*double* ActomyosinMultipBasal = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeActomyosinMultip_basal[0])); // double* ActomyosinMultipApical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeActomyosinMultip_apical[0]));*/ // MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // double* contractActomyo_multip = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip[0])); // double* contractActomyo_multip_apical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip_apical[0])); // bool* isEnteringMitotic = thrust::raw_pointer_cast( // &(cellInfoVecs.isEnteringMitotic[0])); // double* contractileSpringGrowthProgress = thrust::raw_pointer_cast( // &(cellInfoVecs.contractileSpringGrowthProgress[0])); // double* distFromNucleus_normal = thrust::raw_pointer_cast( // &(cellInfoVecs.distFromNucleus_normal[0])); // double* distFromNucleus_normal_apical = thrust::raw_pointer_cast( // &(cellInfoVecs.distFromNucleus_normal_apical[0])); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().nodeF_MM_C_X.begin(), // nodes->getInfoVecs().nodeF_MM_C_Y.begin(), // nodes->getInfoVecs().nodeContractEnergyT.begin(), // nodes->getInfoVecs().basalContractPair.begin())), // // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, ActomyosinMultipBasal,ActomyosinMultipApical, nodeTypeAddr,nodeMemMirrorIndexAddr, // // timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division)); // AddMemContractForce_tmp(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr, // timeRatio, contractActomyo_multip, contractActomyo_multip_apical, distFromNucleus_max, distFromNucleus_min, distFromNucleus_normal, distFromNucleus_normal_apical, percentage_before_timeRatio_Crit_Division_scaling, isEnteringMitotic, contractileSpringGrowthProgress)); // } void SceCells::applyMembContraction2(double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_Division, double distFromNucleus_max, double distFromNucleus_min, double mitoRndActomyoStrengthScaling) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin2(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); // double* nodeLocZAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocZ[0])); /*double* ActomyosinMultipBasal = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActomyosinMultip_basal[0])); double* ActomyosinMultipApical = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActomyosinMultip_apical[0]));*/ MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[0])); int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // double* contractActomyo_multip = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip[0])); // double* contractActomyo_multip_apical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip_apical[0])); double* contractActomyo_multip = thrust::raw_pointer_cast( &(cellInfoVecs.contractActomyo_multip_perCell[0])); double* contractActomyo_multip_apical = thrust::raw_pointer_cast( &(cellInfoVecs.contractActomyo_multip_apical_perCell[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); double* contractileSpringGrowthProgress = thrust::raw_pointer_cast( &(cellInfoVecs.contractileSpringGrowthProgress[0])); double* distFromNucleus_normal = thrust::raw_pointer_cast( &(cellInfoVecs.distFromNucleus_normal[0])); double* distFromNucleus_normal_apical = thrust::raw_pointer_cast( &(cellInfoVecs.distFromNucleus_normal_apical[0])); double* individualCellHeight = thrust::raw_pointer_cast( &(cellInfoVecs.individualCellHeight[0])); double* distFromBasalLoc = thrust::raw_pointer_cast( &(cellInfoVecs.distFromBasalLoc[0])); double* distFromApicalLoc = thrust::raw_pointer_cast( &(cellInfoVecs.distFromApicalLoc[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucDesireDistApical.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocX.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucDesireDistApical.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocX.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().memNodeType1.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MM_C_X.begin(), nodes->getInfoVecs().nodeF_MM_C_Y.begin(), nodes->getInfoVecs().nodeContractEnergyT.begin(), nodes->getInfoVecs().basalContractPair.begin())), // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, ActomyosinMultipBasal,ActomyosinMultipApical, nodeTypeAddr,nodeMemMirrorIndexAddr, // timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division)); AddMemContractForce_tmp2(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr, timeRatio, contractActomyo_multip, contractActomyo_multip_apical, distFromNucleus_max, distFromNucleus_min, distFromNucleus_normal, distFromNucleus_normal_apical, mitoRndActomyoStrengthScaling,//percentage_before_timeRatio_Crit_Division_scaling, isEnteringMitotic, contractileSpringGrowthProgress, individualCellHeight, distFromBasalLoc, distFromApicalLoc)); } // this function is not currently active. it was useful when nucleus is modeled as one point node and then force interaction between nucleus and other nodes was implemented in this function. The force interaction is one-way so only effect of nucleus on other nodes. void SceCells::applyForceInteractionNucleusAsPoint() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucleusLocX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.nucleusLocY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucleusLocX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.nucleusLocY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddNucleusForce(grthPrgrCriVal_M)); } void SceCells::PlotNucleus (int & lastPrintNucleus, int & outputFrameNucleus) { lastPrintNucleus=lastPrintNucleus+1 ; if (lastPrintNucleus>=10000) { outputFrameNucleus++ ; lastPrintNucleus=0 ; std::string vtkFileName = "Nucleus_" + patch::to_string(outputFrameNucleus-1) + ".vtk"; ofstream NucleusOut; NucleusOut.open(vtkFileName.c_str()); NucleusOut<< "# vtk DataFile Version 3.0" << endl; NucleusOut<< "Result for paraview 2d code" << endl; NucleusOut << "ASCII" << endl; NucleusOut << "DATASET UNSTRUCTURED_GRID" << std::endl; NucleusOut << "POINTS " << allocPara_m.currentActiveCellCount << " float" << std::endl; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { NucleusOut << cellInfoVecs.InternalAvgX[i] << " " << cellInfoVecs.InternalAvgY[i] << " " << 0.0 << std::endl; } NucleusOut<< std::endl; NucleusOut.close(); } } // __device__ // void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double forceValue = 0; // // if (growPro > grthPrgrCriEnd_M) { // if (1 < 0){ // if (linkLength < sceIBDiv_M[4]) { // forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] // * exp(-linkLength / sceIBDiv_M[2]) // + sceIBDiv_M[1] / sceIBDiv_M[3] // * exp(-linkLength / sceIBDiv_M[3]); // } // } else if (2< 0){//(growPro > grthPrgrCriVal_M) { // double percent = (growPro - grthPrgrCriVal_M) // / (grthPrgrCriEnd_M - grthPrgrCriVal_M); // double lenLimit = percent * (sceIBDiv_M[4]) // + (1.0 - percent) * sceIB_M[4]; // if (linkLength < lenLimit) { // double intnlBPara0 = percent * (sceIBDiv_M[0]) // + (1.0 - percent) * sceIB_M[0]; // double intnlBPara1 = percent * (sceIBDiv_M[1]) // + (1.0 - percent) * sceIB_M[1]; // double intnlBPara2 = percent * (sceIBDiv_M[2]) // + (1.0 - percent) * sceIB_M[2]; // double intnlBPara3 = percent * (sceIBDiv_M[3]) // + (1.0 - percent) * sceIB_M[3]; // forceValue = -intnlBPara0 / intnlBPara2 // * exp(-linkLength / intnlBPara2) // + intnlBPara1 / intnlBPara3 // * exp(-linkLength / intnlBPara3); // } // } else { // if (linkLength < sceIB_M[4]) { // forceValue = -sceIB_M[0] / sceIB_M[2] // * exp(-linkLength / sceIB_M[2]) // + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); // } // } // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M, bool enteringMitotic) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; // if (growPro > grthPrgrCriEnd_M) { if (1 < 0){ if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2< 0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (enteringMitotic == true){ if (linkLength < 1.25*sceIB_M[4]) { forceValue = (-sceIB_M[0] / sceIB_M[2] * exp(-linkLength / (1.25*sceIB_M[2])) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / (1.25*sceIB_M[3]))); } } else{ if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void CalAndAddIMEnergy(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& IMEnergyT, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double IMEnergy = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { IMEnergy = sceIBDiv_M[0] * exp(-linkLength / sceIBDiv_M[2]) -sceIBDiv_M[1] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; IMEnergy = intnlBPara0 * exp(-linkLength / intnlBPara2) - intnlBPara1 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { IMEnergy = sceIB_M[0] * exp(-linkLength / sceIB_M[2]) - sceIB_M[1] * exp(-linkLength / sceIB_M[3]); } } IMEnergyT=IMEnergyT+IMEnergy ; } //Ali function added for eventually computing pressure for each cells // __device__ // void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double forceValue = 0; // if (1<0){//if (growPro > grthPrgrCriEnd_M) { // if (linkLength < sceIBDiv_M[4]) { // forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] // * exp(-linkLength / sceIBDiv_M[2]) // + sceIBDiv_M[1] / sceIBDiv_M[3] // * exp(-linkLength / sceIBDiv_M[3]); // } // } else if (2<0){//(growPro > grthPrgrCriVal_M) { // double percent = (growPro - grthPrgrCriVal_M) // / (grthPrgrCriEnd_M - grthPrgrCriVal_M); // double lenLimit = percent * (sceIBDiv_M[4]) // + (1.0 - percent) * sceIB_M[4]; // if (linkLength < lenLimit) { // double intnlBPara0 = percent * (sceIBDiv_M[0]) // + (1.0 - percent) * sceIB_M[0]; // double intnlBPara1 = percent * (sceIBDiv_M[1]) // + (1.0 - percent) * sceIB_M[1]; // double intnlBPara2 = percent * (sceIBDiv_M[2]) // + (1.0 - percent) * sceIB_M[2]; // double intnlBPara3 = percent * (sceIBDiv_M[3]) // + (1.0 - percent) * sceIB_M[3]; // forceValue = -intnlBPara0 / intnlBPara2 // * exp(-linkLength / intnlBPara2) // + intnlBPara1 / intnlBPara3 // * exp(-linkLength / intnlBPara3); // } // } else { // if (linkLength < sceIB_M[4]) { // forceValue = -sceIB_M[0] / sceIB_M[2] // * exp(-linkLength / sceIB_M[2]) // + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); // } // } // F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; // F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M, bool enteringMitotic) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (1<0){//if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (enteringMitotic == true){ if (linkLength < 1.25*sceIB_M[4]) { forceValue = (-sceIB_M[0] / sceIB_M[2] * exp(-linkLength / (1.25*sceIB_M[2])) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / (1.25*sceIB_M[3]))); } } else{ if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddMM_ContractRepl(double& xPos, double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; double sceMM_C[5] ; for (int i=0 ; i<5 ; i++) { sceMM_C[i]=sceIIDiv_M[i] ; } if (linkLength < sceMM_C[4]) { forceValue = -sceMM_C[0] / sceMM_C[2] * exp(-linkLength / sceMM_C[2]) + sceMM_C[1] / sceMM_C[3] * exp(-linkLength / sceMM_C[3]); } F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } // __device__ // void calAndAddMM_ContractAdh(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double lZero=0.03125 ; // //double kCAdh=30 ; // double forceValue = 0; // if (linkLength > lZero) { // forceValue =kContractMemb*(linkLength-lZero) ; // } // F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; // F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddMM_ContractAdh(double& xPos, double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y, double& kContrMemb_multip, double& kContrMemb_multip2) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double lZero=0.03125 ; //double kCAdh=30 ; double forceValue = 0; double scaling = (kContrMemb_multip + kContrMemb_multip2)/2.0; if (linkLength > lZero) { forceValue =scaling*kContractMemb*(linkLength-lZero) ; } F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void CalAndAddIIEnergy(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& IIEnergyT, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double IIEnergy = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { IIEnergy= sceIIDiv_M[0]* exp(-linkLength / sceIIDiv_M[2]) - sceIIDiv_M[1]* exp(-linkLength / sceIIDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; IIEnergy = intraPara0 * exp(-linkLength / intraPara2) -intraPara1 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { IIEnergy = sceII_M[0] * exp(-linkLength / sceII_M[2]) -sceII_M[1] * exp(-linkLength / sceII_M[3]); } } IIEnergyT=IIEnergyT+IIEnergy ; } __device__ void calAndAddNucleusEffect(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceNDiv_M[4]) { forceValue = -sceNDiv_M[0] / sceNDiv_M[2] * exp(-linkLength / sceNDiv_M[2]) + sceNDiv_M[1] / sceNDiv_M[3] * exp(-linkLength / sceNDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceNDiv_M[4]) + (1.0 - percent) * sceN_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceNDiv_M[0]) + (1.0 - percent) * sceN_M[0]; double intraPara1 = percent * (sceNDiv_M[1]) + (1.0 - percent) * sceN_M[1]; double intraPara2 = percent * (sceNDiv_M[2]) + (1.0 - percent) * sceN_M[2]; double intraPara3 = percent * (sceNDiv_M[3]) + (1.0 - percent) * sceN_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceN_M[4]) { forceValue = -sceN_M[0] / sceN_M[2] * exp(-linkLength / sceN_M[2]) + sceN_M[1] / sceN_M[3] * exp(-linkLength / sceN_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } void SceCells::writeNucleusIniLocPercent() { ofstream output ; thrust::host_vector <double> nucleusLocPercentHost ; string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbolOutput + "Resume.cfg"; output.open(resumeFileName.c_str() ); nucleusLocPercentHost=cellInfoVecs.nucleusLocPercent ; for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ output << i <<" "<<nucleusLocPercentHost[i] << endl ; } output.close() ; } // void SceCells::readNucleusIniLocPercent() { // ifstream input ; // vector <double> nucleusLocPercentHost ; // int dummy ; // double percent ; // string uniqueSymbol = globalConfigVars.getConfigValue("UniqueSymbol").toString(); // string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbol + "Resume.cfg"; // input.open(resumeFileName.c_str() ); // if (input.is_open()) { // cout << " Suceessfully openend resume input file for initial locations of nucleus" << endl ; // } // else{ // throw std::invalid_argument ("Failed openening the resume input file for initial locations of nucleus") ; // } // for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ // input >> dummy >> percent ; // nucleusLocPercentHost.push_back(percent) ; // } // input.close() ; // cellInfoVecs.nucleusLocPercent= nucleusLocPercentHost ; // } // Original function dealing with nucleus position percentage if there is no division at all. void SceCells::readNucleusIniLocPercent() { ifstream input ; vector <double> nucleusLocPercentHost ; int dummy ; double percent ; string uniqueSymbol = globalConfigVars.getConfigValue("UniqueSymbol").toString(); string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbol + "Resume.cfg"; input.open(resumeFileName.c_str() ); if (input.is_open()) { cout << " Suceessfully openend resume input file for initial locations of nucleus" << endl ; } else{ throw std::invalid_argument ("Failed openening the resume input file for initial locations of nucleus") ; } for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ input >> dummy >> percent ; nucleusLocPercentHost.push_back(percent) ; } if (allocPara_m.currentActiveCellCount < allocPara_m.maxCellCount){ std::cout<<"The number of currently active cell is less than the maximally allowed number, we will reserve space for additional cells."<<std::endl; std::cout<<"The nucleus percentage of inactive cells will be set to 0.0 initially, and must be updated when new cells are introduced."<<std::endl; int gap = allocPara_m.maxCellCount - allocPara_m.currentActiveCellCount; for (int gap_count = 0; gap_count < gap; gap_count++){ nucleusLocPercentHost.push_back(0.0); } } input.close() ; cellInfoVecs.nucleusLocPercent= nucleusLocPercentHost ; } void SceCells::allComponentsMoveImplicitPart() { vector <int> indexPrev, indexNext ; #ifdef debugModeECM hipEvent_t start1, start2, start3, stop; float elapsedTime1, elapsedTime2, elapsedTime3 ; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&stop); hipEventRecord(start1, 0); #endif CalRHS(); #ifdef debugModeECM hipEventRecord(start2, 0); hipEventSynchronize(start2); hipEventElapsedTime(&elapsedTime1, start1, start2); #endif EquMotionCoef(indexPrev, indexNext); #ifdef debugModeECM hipEventRecord(start3, 0); hipEventSynchronize(start3); hipEventElapsedTime(&elapsedTime2, start2, start3); #endif UpdateLocations(indexPrev,indexNext); #ifdef debugModeECM hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime3, start3, stop); std::cout << "time 1 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; #endif } void SceCells::StoreNodeOldPositions() { //nodes->getInfoVecs().locXOldHost.clear(); //nodes->getInfoVecs().locYOldHost.clear(); //nodes->getInfoVecs().locXOldHost.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().locYOldHost.resize(totalNodeCountForActiveCells) ; thrust::copy (nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocX.begin() + totalNodeCountForActiveCells, nodes->getInfoVecs().locXOldHost.begin()); thrust::copy (nodes->getInfoVecs().nodeLocY.begin() , nodes->getInfoVecs().nodeLocY.begin() + totalNodeCountForActiveCells, nodes->getInfoVecs().locYOldHost.begin()); } void SceCells::CalRHS () { // cout << "total node count for active cells in CalRHS function is="<<totalNodeCountForActiveCells << endl ; //nodes->getInfoVecs().rHSXHost.clear() ; //nodes->getInfoVecs().rHSYHost.clear() ; //nodes->getInfoVecs().rHSXHost.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().rHSYHost.resize(totalNodeCountForActiveCells) ; thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin()))+totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().rHSXHost.begin(), nodes->getInfoVecs().rHSYHost.begin()))); } void SceCells::EquMotionCoef(vector<int> & indexPrev, vector<int> & indexNext) { vector <uint> activeMemCount(allocPara_m.currentActiveCellCount) ; double distWithNext[totalNodeCountForActiveCells] ; double distWithPrev[totalNodeCountForActiveCells] ; int cellRank ; int nodeRank ; indexPrev.clear() ; indexNext.clear() ; //nodes->getInfoVecs().hCoefD.clear() ; //nodes->getInfoVecs().hCoefLd.clear() ; //nodes->getInfoVecs().hCoefUd.clear() ; //nodes->getInfoVecs().nodeIsActiveH.clear(); indexPrev.resize(totalNodeCountForActiveCells) ; indexNext.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().hCoefD.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().hCoefLd.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().hCoefUd.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().nodeIsActiveH.resize(totalNodeCountForActiveCells) ; thrust::copy (nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActiveH.begin()); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin() , cellInfoVecs.activeMembrNodeCounts.begin()+ allocPara_m.currentActiveCellCount, activeMemCount.begin()); //cout << "Maximum all node per cells is " << allocPara_m.maxAllNodePerCell << endl ; for ( int i=0 ; i< totalNodeCountForActiveCells ; i++) { cellRank=i/allocPara_m.maxAllNodePerCell ; nodeRank=i%allocPara_m.maxAllNodePerCell ; if ( nodeRank<activeMemCount [cellRank]) { indexNext.at(i)=i+1 ; indexPrev.at(i)=i-1 ; if ( nodeRank==activeMemCount [cellRank]-1){ indexNext.at(i)=cellRank*allocPara_m.maxAllNodePerCell ; // cout << "index next for cell rank " << cellRank << " is " << indexNext.at(i) << endl ; } if (nodeRank==0){ indexPrev.at(i)=cellRank*allocPara_m.maxAllNodePerCell +activeMemCount [cellRank]-1 ; // cout << "Active membrane nodes for cell rank " << cellRank << " is " <<activeMemCount [cellRank]<<endl ; // cout << "index previous for cell rank " << cellRank << " is " << indexPrev.at(i) << endl ; } distWithNext[i]=sqrt( pow(nodes->getInfoVecs().locXOldHost[indexNext.at(i)] - nodes->getInfoVecs().locXOldHost[i],2) + pow(nodes->getInfoVecs().locYOldHost[indexNext.at(i)] - nodes->getInfoVecs().locYOldHost[i],2)) ; distWithPrev[i]=sqrt( pow(nodes->getInfoVecs().locXOldHost[indexPrev.at(i)] - nodes->getInfoVecs().locXOldHost[i],2) + pow(nodes->getInfoVecs().locYOldHost[indexPrev.at(i)] - nodes->getInfoVecs().locYOldHost[i],2)); } } double sponLen= globalConfigVars.getConfigValue("MembrEquLen").toDouble(); double k= globalConfigVars.getConfigValue("MembrStiff").toDouble(); for ( int i=0 ; i< totalNodeCountForActiveCells ; i++) { if (nodes->getInfoVecs().nodeIsActiveH.at(i)==false) { continue ; } cellRank=i / allocPara_m.maxAllNodePerCell; nodeRank=i % allocPara_m.maxAllNodePerCell; if (nodeRank<activeMemCount [cellRank]) { nodes->getInfoVecs().hCoefD[i]= 1 + k*dt/Damp_Coef*( 2 - sponLen/(distWithPrev[i]+0.2*sponLen) - sponLen/(distWithNext[i]+0.2*sponLen)) ; nodes->getInfoVecs().hCoefLd[i]= k*dt/Damp_Coef*(-1 + sponLen/(distWithPrev[i]+0.2*sponLen)) ; nodes->getInfoVecs().hCoefUd[i]= k*dt/Damp_Coef*(-1 + sponLen/(distWithNext[i]+0.2*sponLen)) ; } else { // no spring between neighboring points exist nodes->getInfoVecs().hCoefD[i]=1.0 ; nodes->getInfoVecs().hCoefLd[i]=0.0 ; nodes->getInfoVecs().hCoefUd[i]=0.0 ; } } } void SceCells::UpdateLocations(const vector <int> & indexPrev,const vector <int> & indexNext ) { vector <double> locXTmpHost=solverPointer->SOR3DiagPeriodic(nodes->getInfoVecs().nodeIsActiveH, nodes->getInfoVecs().hCoefLd, nodes->getInfoVecs().hCoefD, nodes->getInfoVecs().hCoefUd, nodes->getInfoVecs().rHSXHost, indexPrev,indexNext, nodes->getInfoVecs().locXOldHost); vector <double> locYTmpHost=solverPointer->SOR3DiagPeriodic(nodes->getInfoVecs().nodeIsActiveH, nodes->getInfoVecs().hCoefLd, nodes->getInfoVecs().hCoefD, nodes->getInfoVecs().hCoefUd, nodes->getInfoVecs().rHSYHost, indexPrev,indexNext, nodes->getInfoVecs().locYOldHost); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple (locXTmpHost.begin(), locYTmpHost.begin())), thrust::make_zip_iterator( thrust::make_tuple (locXTmpHost.begin(), locYTmpHost.begin()))+totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple (nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin()))); }
522214581e96d983ee77235a63e0e631df281f75.cu
// to do list: //1-center of the tissue is manually intorduced in the obtainTwoNewCenter function to recognize if the mothe cell is in front or behind //2- If two division occur at the exact same time, there is a chance of error in detecting mother and daughter cells //3- In function processMemVec the lateralBefore and LateralAfter are not carefully assigned. If the code wanted to be used again for the cases where division is happening, this should be revisited. //4- If the code wanted to be used again for the case where node deletion is active then the function for calculating cell pressure (void SceCells::calCellPressure()) need to be revisited. //5- two bool variables subcellularPolar and cellularPolar are given values inside the code. Although for now it is always true, it is better to be input parameters. //6-the value of L0 in the function calAndAddMM_ContractAdh is directly inside the function. It should be an input of the code //7- In the function calAndAddMM_ContractRepl, the values of Morse potential are equal to the values of sceIIDiv_M[i] in the input file. it should be an input of the code. //8- In the function calBendMulti_Mitotic the equlibrium angle for bending stifness is pi it should be an input for the code //Notes: // 1- Currently the nucleus position is desired location not an enforced position. So, all the functions which used "nucleusLocX" & "nucleusLocY" are not active. Instead two variables "nucleusDesireLocX" & "nucleusDesireLocY" are active and internal avg position represent where the nuclei are located. // 2- NucleusLocPercent info is currently updated in "copyFirstCellArr_M" using the mother cellrank and daughter cellrank. For now, the daughter cell // will inherit the same percentage as the mother cell. - Kevin // 3- We currently force all cell area to be positive after its execution. However, this is only an adhoc method since modifying the CalTriArea anyway will lead // to seg fault. Will need to figure out why eventually. - Kevin // 4- To accommodate non-convex cell shape during division, the "isMotherCellBehind" is set to be true all the time. Since we are not tracking lineage in anyway at the moment // , it is acceptable. - Kevin // 5- "calApicalBasalRingIds" function is not updated for the newest division algorithm (for non-convex cell) using basal and apical loc of each cell. Since we are not computing contractile ring forces, // , it is unnecessary. However, if we do indeed need to compute the force, it has to be updated accordingly. - Kevin // 6- The 'contractileSpringGrowthProgressSpeed' and similar constants must be recalculated if the min and max distance from the cell center is adjusted for mitotic rounding. - Kevin #include "SceCells.h" #include <cmath> #include <numeric> //# define debugModeECM double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double kContractMemb ; __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceN_M[5]; //Ali __constant__ double sceIIDiv_M[5]; __constant__ double sceNDiv_M[5]; //Ali __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali namespace patch{ template <typename T> std::string to_string (const T& n) { std:: ostringstream stm ; stm << n ; return stm.str() ; } } //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri, double adhereIndex) { /* if (adhereIndex==-1) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } */ // else { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } // } } // //Ali __device__ double calMembrForce_Actin(double& length, double kAvg) { return ((length - membrEquLen) * kAvg); } __device__ double calMembrForce_ActinMitotic(double& length, double kAvg) { return ((length - 0.5*membrEquLen) * kAvg); } __device__ double CalMembrLinSpringEnergy(double& length, double kAvg) { return (0.5*kAvg *(length - membrEquLen)*(length - membrEquLen)) ; } __device__ double DefaultMembraneStiff() { int kStiff=membrStiff ; return kStiff; } __device__ double CalExtForce(double curTime) { return min(curTime * F_Ext_Incline_M2,10.0); } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } //void SceCells::UpdateTimeStepByAdaptiveMethod( double adaptiveLevelCoef,double minDt,double maxDt, double & dt) { //double energyPrime=( energyCell.totalNodeEnergyCell +eCM.energyECM.totalEnergyECM - energyCell.totalNodeEnergyCellOld - eCM.energyECM.totalEnergyECMOld)/dt ; //eCM.energyECM.totalEnergyPrimeECM=( eCM.energyECM.totalEnergyECM - eCM.energyECM.totalEnergyECMOld)/dt ; //dt=dt ; // max (minDt, maxDt/sqrt( 1 +adaptiveLevelCoef*pow(eCM.energyECM.totalEnergyPrimeECM,2))) ; //dt=max (minDt, maxDt/sqrt( 1 +pow(adaptiveLevelCoef*eCM.energyECM.totalEnergyPrimeECM,2))) ; //} void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 kContractMemb = globalConfigVars.getConfigValue("KContractMemb").toDouble(); //Ali membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that out safely on 04/04/2017 } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 // this constructor is not active SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput,SceECM* eCMInput, Solver * solver, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, std::vector<ECellType> &eCellTypeV1, double InitTimeStage) { curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); double simulationTotalTime = globalConfigVars.getConfigValue("SimulationTotalTime").toDouble(); double simulationTimeStep = globalConfigVars.getConfigValue("SimulationTimeStep").toDouble(); int TotalNumOfOutputFrames = globalConfigVars.getConfigValue("TotalNumOfOutputFrames").toInt(); std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; isInitNucPercentCalculated=false ; isBasalActinPresent=true ; isCellGrowSet=false ; cout <<" Basal actinomyosin is active on pouch cells" << endl ; addNode=true ; cout << " addNode boolean is initialized " <<addNode <<endl ; relaxCount=0 ; freqPlotData=int ( (simulationTotalTime-InitTimeStage)/(simulationTimeStep*TotalNumOfOutputFrames) ) ; memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); cout << "relax count is initialized as" << relaxCount << endl ; initialize_M(nodesInput, eCMInput, solver); copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec, eCellTypeV1); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { cellInfoVecs.daughterCellProduced.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.distFromNucleus_normal.resize(allocPara_m.maxCellCount,0.0); cellInfoVecs.distFromNucleus_normal_apical.resize(allocPara_m.maxCellCount,0.0); cellInfoVecs.individualCellHeight.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.individualCellHeight_springHeight.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.distFromBasalLoc.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.distFromApicalLoc.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.contractActomyo_multip_perCell.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.contractActomyo_multip_apical_perCell.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.numApicalVec.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.numBasalVec.resize(allocPara_m.maxCellCount, 0); cellInfoVecs.cellRankVec.resize(allocPara_m.maxCellCount, 0); //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.contractileSpringGrowthProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaGrowthProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaGrowthProgressNonMitotic.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.InternalAvgX.resize(allocPara_m.maxCellCount); //cellInfoVecs.InternalAvgIniX.resize(allocPara_m.maxCellCount); cellInfoVecs.tmpShiftVecX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.InternalAvgY.resize(allocPara_m.maxCellCount); //cellInfoVecs.InternalAvgIniY.resize(allocPara_m.maxCellCount); cellInfoVecs.tmpShiftVecY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.apicalLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.apicalLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.basalLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.basalLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.eCMNeighborId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.nucleusLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusDesireLocX.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusDesireLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucDesireDistApical.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.cellCenterDesireDistApical.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.cellCenterPerturbedLocLocX.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.cellCenterPerturbedLocLocY.resize(allocPara_m.maxCellCount); //Kevin cellInfoVecs.nucleusLocY.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.nucleusLocPercent.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.apicalNodeCount.resize(allocPara_m.maxCellCount,0); //Ali cellInfoVecs.basalNodeCount.resize(allocPara_m.maxCellCount,0); //Ali cellInfoVecs.ringApicalId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.ringBasalId.resize(allocPara_m.maxCellCount,-1); //Ali cellInfoVecs.sumLagrangeFPerCellX.resize(allocPara_m.maxCellCount,0.0); //Ali cellInfoVecs.sumLagrangeFPerCellY.resize(allocPara_m.maxCellCount,0.0); //Ali cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.cellRanksTmpStorage1.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrRemovingNode.resize(allocPara_m.maxCellCount, false); // Ali cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.minTenIndxVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenIndxTypeVec.resize(allocPara_m.maxCellCount); cellInfoVecs.minDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI cellInfoVecs.cellPressure.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.sumF_MI_M_N.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.sumLagrangeFN.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.eCellTypeV2.resize(allocPara_m.maxCellCount, notActive);//Ali //cellInfoVecs.eCellTypeV2Host.resize(allocPara_m.maxCellCount, notActive);//Ali cellInfoVecs.cellRoot.resize(allocPara_m.maxCellCount, -1);//Ali thrust:: sequence (cellInfoVecs.cellRoot.begin(),cellInfoVecs.cellRoot.begin()+allocPara_m.currentActiveCellCount) ; //Ali std::cout << "initial number of active cells is " <<allocPara_m.currentActiveCellCount <<std::endl; std::cout <<"last cell rank used in the cell root is " <<cellInfoVecs.cellRoot[allocPara_m.currentActiveCellCount-1] << endl ; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeLocXApical.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocYApical.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocXBasal.resize(allocPara_m.maxTotalNodeCount); //Ali cellNodeInfoVecs.activeLocYBasal.resize(allocPara_m.maxTotalNodeCount); //Ali } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.memNodeType1Address = thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[allocPara_m.bdryNodeCount])); //Ali growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput, SceECM *eCMInput, Solver *solver) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; //pointer assigned eCMPointerCells=eCMInput ; //pointer assigned solverPointer=solver ; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); // It copies the controlPara from nstance of class SceNodes to the instance of class of SceCells //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double & dt, double Damp_Coef, double InitTimeStage, double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_ECM, double timeRatio_Crit_Division, double volume_Increase_Target_Ratio, double volume_Increase_Scale, double postDivision_restorationRateScale, int cycle, double distFromNucleus_max, double distFromNucleus_min, double distFromNucleus_normalMax1,double distFromNucleus_normalMax2,double distFromNucleus_normalMax3, double distFromNucleus_normalMax_apical1, double distFromNucleus_normalMax_apical2, double distFromNucleus_normalMax_apical3, double percentage_before_timeRatio_Crit_Division_scaling, double growthProgressSpeed, int maxApicalBasalNodeNum, double maxLengthToAddMemNodes, double mitoRndActomyoStrengthScaling, double thresholdToIntroduceNewCell, double mitoticThreshold) { //Ali // double time_Ratio = current_Time/total_Time; // if (timeRatio == timeRatio_Crit_Division){ // std::cout<<"timeRatio = "<<timeRatio<<std::endl; // } // if (cycle < 0){ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } // else if (cycle >= 10){ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } // if (cycle == 0 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 31; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 1 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 25;//33; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 2 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 6;//27; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 3 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 10;//29; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 4 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 61;//35; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 5 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 30;//86; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 6 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 87; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 7 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 5;//88; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 8 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 42;//89; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else if (cycle == 9 && timeRatio < timeRatio_Crit_Division){ // divAuxData.cellRank_division = 37;//90; // divAuxData.cellRank_division2 = allocPara_m.currentActiveCellCount; // } // else{ // divAuxData.cellRank_division = -1; // divAuxData.cellRank_division2 = -1;//allocPara_m.currentActiveCellCount; // } #ifdef debugModeECM cudaEvent_t start1, start2, start3, start4, start5, start6, start7, start8, start9, start10, start11, start12, start13, stop; float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ; float elapsedTime9, elapsedTime10, elapsedTime11, elapsedTime12, elapsedTime13 ; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&start4); cudaEventCreate(&start5); cudaEventCreate(&start6); cudaEventCreate(&start7); cudaEventCreate(&start8); cudaEventCreate(&start9); cudaEventCreate(&start10); cudaEventCreate(&start11); cudaEventCreate(&start12); cudaEventCreate(&start13); cudaEventCreate(&stop); cudaEventRecord(start1, 0); #endif // std::cout << " *** 1 ***" << endl; this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay = exp(-curTime * miscPara.prolifDecayCoeff); //cout<< "Current Time in simulation is: "<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; bool cellPolar=true ; bool subCellPolar= true ; // std::cout << " *** 2 ***" << endl; if (curTime>500000) { // eCMPointerCells->SetIfECMIsRemoved(false) ; // isBasalActinPresent=false ; // nodes->SetApicalAdhPresence(true) ; } if (curTime==InitTimeStage) { lastPrintNucleus=10000000 ; //just a big number outputFrameNucleus=0 ; // computeInternalAvgPos_M(); nodes->isInitPhase=false ; // This bool variable is not active in the code anymore string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string cSVFileName = "EnergyExportCell_" + uniqueSymbolOutput + ".CSV"; ofstream EnergyExportCell ; EnergyExportCell.open(cSVFileName.c_str() ); EnergyExportCell <<"curTime"<<","<<"totalMembrLinSpringEnergyCell" << "," <<"totalMembrBendSpringEnergyCell" <<"," << "totalNodeIIEnergyCell"<<"," <<"totalNodeIMEnergyCell"<<","<<"totalNodeEnergyCell"<< std::endl; // Adding cell cycle timer information here std::vector<double> growthProgressReinitialize; growthProgressReinitialize.push_back(-9999.9);//0.0163);//Cell0 growthProgressReinitialize.push_back(-9999.9);//0.0181);//Cell1 // Ver7 // growthProgressReinitialize.push_back( -99999.9); // growthProgressReinitialize.push_back( -99999.9); // growthProgressReinitialize.push_back( -0.58); // growthProgressReinitialize.push_back( -2.69); // growthProgressReinitialize.push_back( -1.2); // growthProgressReinitialize.push_back( 0.1100); // growthProgressReinitialize.push_back( -2.978); // growthProgressReinitialize.push_back( -1.0070); // growthProgressReinitialize.push_back( -2.8052); // growthProgressReinitialize.push_back( -2.0354); // growthProgressReinitialize.push_back( -1.1824); // growthProgressReinitialize.push_back( -0.6577); // growthProgressReinitialize.push_back( 0.6080); // growthProgressReinitialize.push_back( -1.0282); // growthProgressReinitialize.push_back( -0.2033); // growthProgressReinitialize.push_back( -2.34); // growthProgressReinitialize.push_back( 0.0127); // growthProgressReinitialize.push_back( -0.2035); // growthProgressReinitialize.push_back( -1.299); // growthProgressReinitialize.push_back( -2.703); // growthProgressReinitialize.push_back( -1.562); // growthProgressReinitialize.push_back( -2.750); // growthProgressReinitialize.push_back( -0.3286); // growthProgressReinitialize.push_back( -2.083); // growthProgressReinitialize.push_back( -2.79); // growthProgressReinitialize.push_back( -1.1567); // growthProgressReinitialize.push_back( -0.5034); // growthProgressReinitialize.push_back( -1.9003); // growthProgressReinitialize.push_back( 0.4964); // growthProgressReinitialize.push_back( -0.4520); // growthProgressReinitialize.push_back( -2.002); // growthProgressReinitialize.push_back( -1.000); // growthProgressReinitialize.push_back( -1.880); // growthProgressReinitialize.push_back( 0.3719); // growthProgressReinitialize.push_back( -0.7133); // growthProgressReinitialize.push_back( -1.172); // growthProgressReinitialize.push_back( 0.0251); // growthProgressReinitialize.push_back( -2.323); // growthProgressReinitialize.push_back( -1.960); // growthProgressReinitialize.push_back( -0.1294); // growthProgressReinitialize.push_back( 0.2848); // growthProgressReinitialize.push_back( -2.912); // growthProgressReinitialize.push_back( 0.2526); // growthProgressReinitialize.push_back( -2.165); // growthProgressReinitialize.push_back( -1.031); // growthProgressReinitialize.push_back( -0.7257); // growthProgressReinitialize.push_back( -2.087); // growthProgressReinitialize.push_back( -1.018); // growthProgressReinitialize.push_back( 0.0391); // growthProgressReinitialize.push_back( -2.1332); // growthProgressReinitialize.push_back( -3.2330); // growthProgressReinitialize.push_back( -0.3449); // growthProgressReinitialize.push_back( -2.0334); // growthProgressReinitialize.push_back( -0.0101); // growthProgressReinitialize.push_back( 0.4452); // growthProgressReinitialize.push_back( -2.013); // growthProgressReinitialize.push_back( 0.0002); // growthProgressReinitialize.push_back( -1.048); // growthProgressReinitialize.push_back( 0.2862); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); growthProgressReinitialize.push_back( -9999.9); //Cell 2 growthProgressReinitialize.push_back( -9999.9); //Cell 3 // //Ver8 // std::cout<<"growthProgress initial profile Ver. 8"<<std::endl; // growthProgressReinitialize.push_back( -1.0825); // growthProgressReinitialize.push_back( -3.4494); // growthProgressReinitialize.push_back( -2.0672); // growthProgressReinitialize.push_back( -2.8107); // growthProgressReinitialize.push_back( -0.1243); // growthProgressReinitialize.push_back( -2.1773); // growthProgressReinitialize.push_back( -1.2537); // growthProgressReinitialize.push_back( -2.7960); // growthProgressReinitialize.push_back( -0.9416); // growthProgressReinitialize.push_back( -2.3824); // growthProgressReinitialize.push_back( -0.7202); // growthProgressReinitialize.push_back( -1.5852); // growthProgressReinitialize.push_back( -3.1438); // growthProgressReinitialize.push_back( -2.5268); // growthProgressReinitialize.push_back( 0.3817); // growthProgressReinitialize.push_back( -2.8524); // growthProgressReinitialize.push_back( 0.0097); // growthProgressReinitialize.push_back( -1.2120); // growthProgressReinitialize.push_back( 0.7336); // growthProgressReinitialize.push_back( -3.1678); // growthProgressReinitialize.push_back( -1.6186); // growthProgressReinitialize.push_back( -3.0467); // growthProgressReinitialize.push_back( 0.5881); // growthProgressReinitialize.push_back( -3.4803); // growthProgressReinitialize.push_back( -0.2066); // growthProgressReinitialize.push_back( -3.1411); // growthProgressReinitialize.push_back( -1.8009); // growthProgressReinitialize.push_back( -2.3956); // growthProgressReinitialize.push_back( -0.0997); // growthProgressReinitialize.push_back( -1.6665); // growthProgressReinitialize.push_back( 0.3703); // growthProgressReinitialize.push_back( -2.7272); // growthProgressReinitialize.push_back( 0.1945); // growthProgressReinitialize.push_back( -1.0363); // growthProgressReinitialize.push_back( -2.8839); // growthProgressReinitialize.push_back( 0.1254); // growthProgressReinitialize.push_back( -0.8563); // growthProgressReinitialize.push_back( -2.0085); // growthProgressReinitialize.push_back( -1.3187); // growthProgressReinitialize.push_back( -3.1771); // growthProgressReinitialize.push_back( -2.4804); // growthProgressReinitialize.push_back( -1.7266); // growthProgressReinitialize.push_back( -3.2890); // growthProgressReinitialize.push_back( 0.3365); // growthProgressReinitialize.push_back( -1.4138); // growthProgressReinitialize.push_back( -2.0647); // growthProgressReinitialize.push_back( 0.3252); // growthProgressReinitialize.push_back( -1.9307); // growthProgressReinitialize.push_back( -3.0274); // growthProgressReinitialize.push_back( -0.1839); // growthProgressReinitialize.push_back( -1.8436); // growthProgressReinitialize.push_back( -2.4728); // growthProgressReinitialize.push_back( -1.7834); // growthProgressReinitialize.push_back( -3.0901); // growthProgressReinitialize.push_back( 0.5037); // growthProgressReinitialize.push_back( -1.0554); // growthProgressReinitialize.push_back( -3.2459); // //Ver9 // std::cout<<"growthProgress initial profile Ver. 9"<<std::endl; // growthProgressReinitialize.push_back(-1.3519); // growthProgressReinitialize.push_back(-0.2639); // growthProgressReinitialize.push_back(-3.1180); // growthProgressReinitialize.push_back(0.7373); // growthProgressReinitialize.push_back(-1.8542); // growthProgressReinitialize.push_back(-0.4155); // growthProgressReinitialize.push_back(-3.1281); // growthProgressReinitialize.push_back(0.6909); // growthProgressReinitialize.push_back(-1.9936); // growthProgressReinitialize.push_back(-0.6243); // growthProgressReinitialize.push_back(-3.1490); // growthProgressReinitialize.push_back(0.7337); // growthProgressReinitialize.push_back(-1.9932); // growthProgressReinitialize.push_back(-0.4141); // growthProgressReinitialize.push_back(-3.4583); // growthProgressReinitialize.push_back(0.7396); // growthProgressReinitialize.push_back(-1.4360); // growthProgressReinitialize.push_back(-0.3950); // growthProgressReinitialize.push_back(-3.3643); // growthProgressReinitialize.push_back(0.6351); // growthProgressReinitialize.push_back(-1.9657); // growthProgressReinitialize.push_back(-0.8638); // growthProgressReinitialize.push_back(-3.4182); // growthProgressReinitialize.push_back(0.6429); // growthProgressReinitialize.push_back(-2.3591); // growthProgressReinitialize.push_back(-0.3721); // growthProgressReinitialize.push_back(-3.4084); // growthProgressReinitialize.push_back(0.6688); // growthProgressReinitialize.push_back(-2.2819); // growthProgressReinitialize.push_back(-0.9605); // growthProgressReinitialize.push_back(-3.3901); // growthProgressReinitialize.push_back(0.4828); // growthProgressReinitialize.push_back(-2.2890); // growthProgressReinitialize.push_back(-0.6388); // growthProgressReinitialize.push_back(-3.4209); // growthProgressReinitialize.push_back(0.5414); // growthProgressReinitialize.push_back(-1.7997); // growthProgressReinitialize.push_back(-0.5446); // growthProgressReinitialize.push_back(-2.8349); // growthProgressReinitialize.push_back(0.5172); // growthProgressReinitialize.push_back(-1.5476); // growthProgressReinitialize.push_back(-0.4978); // growthProgressReinitialize.push_back(-3.1851); // growthProgressReinitialize.push_back(0.5574); // growthProgressReinitialize.push_back(-1.6700); // growthProgressReinitialize.push_back(-0.6098); // growthProgressReinitialize.push_back(-3.3590); // growthProgressReinitialize.push_back(0.5648); // growthProgressReinitialize.push_back(-1.9684); // growthProgressReinitialize.push_back(-0.5266); // growthProgressReinitialize.push_back(-2.9763); // growthProgressReinitialize.push_back(0.5215); // growthProgressReinitialize.push_back(-1.8311); // growthProgressReinitialize.push_back(-0.6464); // growthProgressReinitialize.push_back(-3.2580); // growthProgressReinitialize.push_back(0.6640); // growthProgressReinitialize.push_back(-2.0480); // //Ver10 // std::cout<<"Growth-progress profile Ver 10 is used"<<std::endl; // growthProgressReinitialize.push_back( -1.9657); // growthProgressReinitialize.push_back( -0.9376); // growthProgressReinitialize.push_back( 0.3190); // growthProgressReinitialize.push_back( -2.3226); // growthProgressReinitialize.push_back( -0.9976); // growthProgressReinitialize.push_back( 0.6312); // growthProgressReinitialize.push_back( -2.4786); // growthProgressReinitialize.push_back( -0.4733); // growthProgressReinitialize.push_back( 0.6356); // growthProgressReinitialize.push_back( -2.4616); // growthProgressReinitialize.push_back( -1.3734); // growthProgressReinitialize.push_back( -0.0702); // growthProgressReinitialize.push_back( -2.4089); // growthProgressReinitialize.push_back( -1.4015); // growthProgressReinitialize.push_back( -0.0385); // growthProgressReinitialize.push_back( -2.4974); // growthProgressReinitialize.push_back( -1.2876); // growthProgressReinitialize.push_back( 0.1130); // growthProgressReinitialize.push_back( -2.4161); // growthProgressReinitialize.push_back( -1.3892); // growthProgressReinitialize.push_back( -0.0828); // growthProgressReinitialize.push_back( -2.4240); // growthProgressReinitialize.push_back( -1.1015); // growthProgressReinitialize.push_back( 0.0612); // growthProgressReinitialize.push_back( -2.2057); // growthProgressReinitialize.push_back( -1.1241); // growthProgressReinitialize.push_back( 0.7000); // growthProgressReinitialize.push_back( -2.2458); // growthProgressReinitialize.push_back( -1.2208); // growthProgressReinitialize.push_back( -0.1874); // growthProgressReinitialize.push_back( -2.2313); // growthProgressReinitialize.push_back( -1.2195); // growthProgressReinitialize.push_back( 0.1352); // growthProgressReinitialize.push_back( -2.3796); // growthProgressReinitialize.push_back( -0.9151); // growthProgressReinitialize.push_back( 0.7106); // growthProgressReinitialize.push_back( -1.9618); // growthProgressReinitialize.push_back( -0.7738); // growthProgressReinitialize.push_back( 0.4064); // growthProgressReinitialize.push_back( -1.7924); // growthProgressReinitialize.push_back( -0.6859); // growthProgressReinitialize.push_back( 0.5891); // growthProgressReinitialize.push_back( -1.9121); // growthProgressReinitialize.push_back( -0.5693); // growthProgressReinitialize.push_back( 0.7430); // growthProgressReinitialize.push_back( -2.2218); // growthProgressReinitialize.push_back( -1.0864); // growthProgressReinitialize.push_back( 0.1956); // growthProgressReinitialize.push_back( -2.1195); // growthProgressReinitialize.push_back( -0.8659); // growthProgressReinitialize.push_back( 0.2405); // growthProgressReinitialize.push_back( -2.4792); // growthProgressReinitialize.push_back( -1.1303); // growthProgressReinitialize.push_back( 0.3634); // growthProgressReinitialize.push_back( -2.2099); // growthProgressReinitialize.push_back( -1.0004); // growthProgressReinitialize.push_back( 0.2268); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back(-9999.9);//0.0140); // growthProgressReinitialize.push_back(-9999.9);//0.0178); // growthProgressReinitialize.push_back(-9999.9);//0.0192); // growthProgressReinitialize.push_back(-9999.9);//0.0109); // growthProgressReinitialize.push_back(-9999.9);//0.0028); // growthProgressReinitialize.push_back(-9999.9);//0.0030); // growthProgressReinitialize.push_back(-9999.9);//0.0052); // growthProgressReinitialize.push_back(-9999.9);//0.0168); // growthProgressReinitialize.push_back(-9999.9);//0.0051); // growthProgressReinitialize.push_back(-9999.9);//0.0163); // growthProgressReinitialize.push_back(-9999.9);//0.0049); // growthProgressReinitialize.push_back(-9999.9);//0.0186); // growthProgressReinitialize.push_back(-9999.9);//0.0070); // growthProgressReinitialize.push_back(-9999.9);//0.0039); // growthProgressReinitialize.push_back(-9999.9);//0.0050); // growthProgressReinitialize.push_back(-9999.9);//0.0123); // growthProgressReinitialize.push_back(-9999.9);//0.0095); // growthProgressReinitialize.push_back(-9999.9);//0.0070); // growthProgressReinitialize.push_back(-9999.9);//0.0166); // growthProgressReinitialize.push_back(-9999.9);//0.0117); // growthProgressReinitialize.push_back(-9999.9);//0.0110); // growthProgressReinitialize.push_back(-9999.9);//0.0183); // growthProgressReinitialize.push_back(-9999.9);//0.0057); // for (int w = 0; w < allocPara_m.maxCellCount; w++){ // if (w < allocPara_m.currentActiveCellCount){ // cellInfoVecs.growthProgress[w] = growthProgressReinitialize[w]; // } // else{ // cellInfoVecs.growthProgress[w] = 0.0; // } // } // //Ver11 // std::cout<<"Growth-progress profile Ver 11 is used"<<std::endl; // growthProgressReinitialize.push_back( 0.8821); // growthProgressReinitialize.push_back( -0.5595); // growthProgressReinitialize.push_back( -0.0401); // growthProgressReinitialize.push_back( 0.4962); // growthProgressReinitialize.push_back( -0.6362); // growthProgressReinitialize.push_back( -0.0459); // growthProgressReinitialize.push_back( 0.6200); // growthProgressReinitialize.push_back( -0.6327); // growthProgressReinitialize.push_back( -0.0860); // growthProgressReinitialize.push_back( 0.6928); // growthProgressReinitialize.push_back( -0.7430); // growthProgressReinitialize.push_back( -0.1503); // growthProgressReinitialize.push_back( 0.4501); // growthProgressReinitialize.push_back( -0.6755); // growthProgressReinitialize.push_back( -0.1654); // growthProgressReinitialize.push_back( 0.6631); // growthProgressReinitialize.push_back( -0.7269); // growthProgressReinitialize.push_back( -0.0736); // growthProgressReinitialize.push_back( 0.6441); // growthProgressReinitialize.push_back( -0.5833); // growthProgressReinitialize.push_back( 0.1423); // growthProgressReinitialize.push_back( 0.6433); // growthProgressReinitialize.push_back( -0.3941); // growthProgressReinitialize.push_back( 0.1151); // growthProgressReinitialize.push_back( 0.7180); // growthProgressReinitialize.push_back( -0.6606); // growthProgressReinitialize.push_back( 0.0325); // growthProgressReinitialize.push_back( 0.8795); // growthProgressReinitialize.push_back( -0.6248); // growthProgressReinitialize.push_back( 0.0403); // growthProgressReinitialize.push_back( 0.7261); // growthProgressReinitialize.push_back( -0.4992); // growthProgressReinitialize.push_back( 0.0718); // growthProgressReinitialize.push_back( 0.6641); // growthProgressReinitialize.push_back( -0.4828); // growthProgressReinitialize.push_back( 0.0918); // growthProgressReinitialize.push_back( 0.7225); // growthProgressReinitialize.push_back( -0.5154); // growthProgressReinitialize.push_back( 0.2171); // growthProgressReinitialize.push_back( 0.7240); // growthProgressReinitialize.push_back( -0.5883); // growthProgressReinitialize.push_back( 0.1415); // growthProgressReinitialize.push_back( 0.7030); // growthProgressReinitialize.push_back( -0.5916); // growthProgressReinitialize.push_back( 0.1664); // growthProgressReinitialize.push_back( 0.6865); // growthProgressReinitialize.push_back( -0.3889); // growthProgressReinitialize.push_back( 0.1533); // growthProgressReinitialize.push_back( 0.6678); // growthProgressReinitialize.push_back( -0.4366); // growthProgressReinitialize.push_back( 0.1610); // growthProgressReinitialize.push_back( 0.7406); // growthProgressReinitialize.push_back( -0.4220); // growthProgressReinitialize.push_back( 0.1918); // growthProgressReinitialize.push_back( 0.7091); // growthProgressReinitialize.push_back( -0.4620); // growthProgressReinitialize.push_back( 0.0381); // growthProgressReinitialize.push_back( -9999.9); // growthProgressReinitialize.push_back( -9999.9); //Ver11 std::cout<<"Growth-progress profile Ver 11 is used"<<std::endl; growthProgressReinitialize.push_back( -9999.9);//Cell4 growthProgressReinitialize.push_back( -0.5595);//Cell5 growthProgressReinitialize.push_back( -0.0401);//Cell6 growthProgressReinitialize.push_back( 0.4962);//Cell7 growthProgressReinitialize.push_back( -0.6362);//Cell8 growthProgressReinitialize.push_back( -0.0459);//Cell9 growthProgressReinitialize.push_back( 0.6200);//Cell10 growthProgressReinitialize.push_back( -0.6327);//Cell11 growthProgressReinitialize.push_back( -0.0860);//Cell12 growthProgressReinitialize.push_back( 0.6928);//Cell13 growthProgressReinitialize.push_back( -0.7430);//Cell14 growthProgressReinitialize.push_back( -0.1503);//Cell15 growthProgressReinitialize.push_back( 0.4501);//Cell16 growthProgressReinitialize.push_back( -0.6755);//Cell17 growthProgressReinitialize.push_back( -0.1654);//Cell18 growthProgressReinitialize.push_back( 0.6631);//Cell19 growthProgressReinitialize.push_back( -0.7269);//Cell20 growthProgressReinitialize.push_back( -0.0736);//Cell21 growthProgressReinitialize.push_back( 0.6441);//Cell22 growthProgressReinitialize.push_back( -0.5833);//Cell23 growthProgressReinitialize.push_back( 0.1423);//Cell24 growthProgressReinitialize.push_back( 0.6433);//Cell25 growthProgressReinitialize.push_back( -0.3941);//Cell26 growthProgressReinitialize.push_back( 0.1151);//Cell27 growthProgressReinitialize.push_back( 0.7180);//Cell28 growthProgressReinitialize.push_back( -0.6606);//Cell29 growthProgressReinitialize.push_back( 0.0325);//Cell30 growthProgressReinitialize.push_back( 0.8900);//Cell31 growthProgressReinitialize.push_back( -0.6248);//Cell32 growthProgressReinitialize.push_back( 0.0403);//Cell33 growthProgressReinitialize.push_back( 0.7261);//Cell34 growthProgressReinitialize.push_back( -0.4992);//Cell35 growthProgressReinitialize.push_back( 0.0718);//Cell36 growthProgressReinitialize.push_back( 0.6641);//Cell37 growthProgressReinitialize.push_back( -0.4828);//Cell38 growthProgressReinitialize.push_back( 0.0918);//Cell39 growthProgressReinitialize.push_back( 0.7225);//Cell40 growthProgressReinitialize.push_back( -0.5154);//Cell41 growthProgressReinitialize.push_back( 0.2171);//Cell42 growthProgressReinitialize.push_back( 0.7240);//Cell43 growthProgressReinitialize.push_back( -0.5883);//Cell44 growthProgressReinitialize.push_back( 0.1415);//Cell45 growthProgressReinitialize.push_back( 0.7030);//Cell46 growthProgressReinitialize.push_back( -0.5916);//Cell47 growthProgressReinitialize.push_back( 0.1664);//Cell48 growthProgressReinitialize.push_back( 0.6865);//Cell49 growthProgressReinitialize.push_back( -0.3889);//Cell50 growthProgressReinitialize.push_back( 0.1533);//Cell51 growthProgressReinitialize.push_back( 0.6678);//Cell52 growthProgressReinitialize.push_back( -0.4366);//Cell53 growthProgressReinitialize.push_back( 0.1610);//Cell54 growthProgressReinitialize.push_back( 0.7406);//Cell55 growthProgressReinitialize.push_back( -0.4220);//Cell56 growthProgressReinitialize.push_back( 0.1918);//Cell57 growthProgressReinitialize.push_back( 0.7091);//Cell58 growthProgressReinitialize.push_back( -0.4620);//Cell59 growthProgressReinitialize.push_back( 0.0381);//Cell60 growthProgressReinitialize.push_back( -9999.9);//Cell61 growthProgressReinitialize.push_back( -9999.9);//Cell62 growthProgressReinitialize.push_back(-9999.9);//0.0140); growthProgressReinitialize.push_back(-9999.9);//0.0178); growthProgressReinitialize.push_back(-9999.9);//0.0192); growthProgressReinitialize.push_back(-9999.9);//0.0109); growthProgressReinitialize.push_back(-9999.9);//0.0028); growthProgressReinitialize.push_back(-9999.9);//0.0030); growthProgressReinitialize.push_back(-9999.9);//0.0052); growthProgressReinitialize.push_back(-9999.9);//0.0168); growthProgressReinitialize.push_back(-9999.9);//0.0051); growthProgressReinitialize.push_back(-9999.9);//0.0163); growthProgressReinitialize.push_back(-9999.9);//0.0049); growthProgressReinitialize.push_back(-9999.9);//0.0186); growthProgressReinitialize.push_back(-9999.9);//0.0070); growthProgressReinitialize.push_back(-9999.9);//0.0039); growthProgressReinitialize.push_back(-9999.9);//0.0050); growthProgressReinitialize.push_back(-9999.9);//0.0123); growthProgressReinitialize.push_back(-9999.9);//0.0095); growthProgressReinitialize.push_back(-9999.9);//0.0070); growthProgressReinitialize.push_back(-9999.9);//0.0166); growthProgressReinitialize.push_back(-9999.9);//0.0117); growthProgressReinitialize.push_back(-9999.9);//0.0110); growthProgressReinitialize.push_back(-9999.9);//0.0183); growthProgressReinitialize.push_back(-9999.9);//0.0057); for (int w = 0; w < allocPara_m.maxCellCount; w++){ if (w < allocPara_m.currentActiveCellCount){ cellInfoVecs.growthProgress[w] = growthProgressReinitialize[w]; } else{ cellInfoVecs.growthProgress[w] = 0.0; } } // Adding actin level (number of contractile springs) information here std::vector<double> distFromNucleus_normalMaxVec; distFromNucleus_normalMaxVec.push_back(-9999.9999);//CellID0 distFromNucleus_normalMaxVec.push_back(-9999.9999);//CellID1 distFromNucleus_normalMaxVec.push_back(0.197835294); distFromNucleus_normalMaxVec.push_back(0.197882353); distFromNucleus_normalMaxVec.push_back(0.216423529); distFromNucleus_normalMaxVec.push_back(0.2248 ); distFromNucleus_normalMaxVec.push_back(0.223623529); distFromNucleus_normalMaxVec.push_back(0.248752941); distFromNucleus_normalMaxVec.push_back(0.251717647); distFromNucleus_normalMaxVec.push_back(0.281788235); distFromNucleus_normalMaxVec.push_back(0.284329412); distFromNucleus_normalMaxVec.push_back(0.284094118); distFromNucleus_normalMaxVec.push_back(0.282776471); distFromNucleus_normalMaxVec.push_back(0.2968 ); distFromNucleus_normalMaxVec.push_back(0.299294118); distFromNucleus_normalMaxVec.push_back(0.3 ); distFromNucleus_normalMaxVec.push_back(0.292658824); distFromNucleus_normalMaxVec.push_back(0.292329412); distFromNucleus_normalMaxVec.push_back(0.284847059); distFromNucleus_normalMaxVec.push_back(0.292564706); distFromNucleus_normalMaxVec.push_back(0.274823529); distFromNucleus_normalMaxVec.push_back(0.242776471); distFromNucleus_normalMaxVec.push_back(0.247529412); distFromNucleus_normalMaxVec.push_back(0.254682353); distFromNucleus_normalMaxVec.push_back(0.224658824); distFromNucleus_normalMaxVec.push_back(0.234776471); distFromNucleus_normalMaxVec.push_back(0.263105882); distFromNucleus_normalMaxVec.push_back(0.236988235); distFromNucleus_normalMaxVec.push_back(0.216423529); distFromNucleus_normalMaxVec.push_back(0.240658824); distFromNucleus_normalMaxVec.push_back(0.196941176); distFromNucleus_normalMaxVec.push_back(0.2184 ); distFromNucleus_normalMaxVec.push_back(0.232094118); distFromNucleus_normalMaxVec.push_back(0.238070588); distFromNucleus_normalMaxVec.push_back(0.248329412); distFromNucleus_normalMaxVec.push_back(0.244611765); distFromNucleus_normalMaxVec.push_back(0.230070588); distFromNucleus_normalMaxVec.push_back(0.221411765); distFromNucleus_normalMaxVec.push_back(0.2352 ); distFromNucleus_normalMaxVec.push_back(0.198305882); distFromNucleus_normalMaxVec.push_back(0.201552941); distFromNucleus_normalMaxVec.push_back(0.2176 ); distFromNucleus_normalMaxVec.push_back(0.210964706); distFromNucleus_normalMaxVec.push_back(0.233082353); distFromNucleus_normalMaxVec.push_back(0.228141176); distFromNucleus_normalMaxVec.push_back(0.236564706); distFromNucleus_normalMaxVec.push_back(0.244141176); distFromNucleus_normalMaxVec.push_back(0.2232 ); distFromNucleus_normalMaxVec.push_back(0.229741176); distFromNucleus_normalMaxVec.push_back(0.220847059); distFromNucleus_normalMaxVec.push_back(0.214164706); distFromNucleus_normalMaxVec.push_back(0.212376471); distFromNucleus_normalMaxVec.push_back(0.218870588); distFromNucleus_normalMaxVec.push_back(0.245129412); distFromNucleus_normalMaxVec.push_back(0.230964706); distFromNucleus_normalMaxVec.push_back(0.252047059); distFromNucleus_normalMaxVec.push_back(0.243576471); distFromNucleus_normalMaxVec.push_back(0.254870588); distFromNucleus_normalMaxVec.push_back(0.245976471); distFromNucleus_normalMaxVec.push_back(0.209270588); distFromNucleus_normalMaxVec.push_back(0.195529412); distFromNucleus_normalMaxVec.push_back(0.194588235); distFromNucleus_normalMaxVec.push_back(0.215482353);//CellID62 std::vector<double> distFromNucleus_normalMaxVec_apical; distFromNucleus_normalMaxVec_apical.push_back(-9999.9999);//CellID0 distFromNucleus_normalMaxVec_apical.push_back(-9999.9999);//CellID1 distFromNucleus_normalMaxVec_apical.push_back(0.260141176); distFromNucleus_normalMaxVec_apical.push_back(0.255152941); distFromNucleus_normalMaxVec_apical.push_back(0.236658824); distFromNucleus_normalMaxVec_apical.push_back(0.236470588); distFromNucleus_normalMaxVec_apical.push_back(0.242776471); distFromNucleus_normalMaxVec_apical.push_back(0.2408 ); distFromNucleus_normalMaxVec_apical.push_back(0.245835294); distFromNucleus_normalMaxVec_apical.push_back(0.250211765); distFromNucleus_normalMaxVec_apical.push_back(0.257741176); distFromNucleus_normalMaxVec_apical.push_back(0.248423529); distFromNucleus_normalMaxVec_apical.push_back(0.251058824); distFromNucleus_normalMaxVec_apical.push_back(0.247623529); distFromNucleus_normalMaxVec_apical.push_back(0.279058824); distFromNucleus_normalMaxVec_apical.push_back(0.276235294); distFromNucleus_normalMaxVec_apical.push_back(0.261505882); distFromNucleus_normalMaxVec_apical.push_back(0.266117647); distFromNucleus_normalMaxVec_apical.push_back(0.245788235); distFromNucleus_normalMaxVec_apical.push_back(0.249270588); distFromNucleus_normalMaxVec_apical.push_back(0.253082353); distFromNucleus_normalMaxVec_apical.push_back(0.253694118); distFromNucleus_normalMaxVec_apical.push_back(0.277223529); distFromNucleus_normalMaxVec_apical.push_back(0.261270588); distFromNucleus_normalMaxVec_apical.push_back(0.252376471); distFromNucleus_normalMaxVec_apical.push_back(0.259341176); distFromNucleus_normalMaxVec_apical.push_back(0.245223529); distFromNucleus_normalMaxVec_apical.push_back(0.251435294); distFromNucleus_normalMaxVec_apical.push_back(0.262635294); distFromNucleus_normalMaxVec_apical.push_back(0.297552941); distFromNucleus_normalMaxVec_apical.push_back(0.276564706); distFromNucleus_normalMaxVec_apical.push_back(0.283529412); distFromNucleus_normalMaxVec_apical.push_back(0.289552941); distFromNucleus_normalMaxVec_apical.push_back(0.272141176); distFromNucleus_normalMaxVec_apical.push_back(0.2424 ); distFromNucleus_normalMaxVec_apical.push_back(0.243623529); distFromNucleus_normalMaxVec_apical.push_back(0.188235294); distFromNucleus_normalMaxVec_apical.push_back(0.257835294); distFromNucleus_normalMaxVec_apical.push_back(0.245223529); distFromNucleus_normalMaxVec_apical.push_back(0.226682353); distFromNucleus_normalMaxVec_apical.push_back(0.208282353); distFromNucleus_normalMaxVec_apical.push_back(0.211388235); distFromNucleus_normalMaxVec_apical.push_back(0.192470588); distFromNucleus_normalMaxVec_apical.push_back(0.1944 ); distFromNucleus_normalMaxVec_apical.push_back(0.194117647); distFromNucleus_normalMaxVec_apical.push_back(0.227623529); distFromNucleus_normalMaxVec_apical.push_back(0.224894118); distFromNucleus_normalMaxVec_apical.push_back(0.242917647); distFromNucleus_normalMaxVec_apical.push_back(0.215152941); distFromNucleus_normalMaxVec_apical.push_back(0.208423529); distFromNucleus_normalMaxVec_apical.push_back(0.208941176); distFromNucleus_normalMaxVec_apical.push_back(0.195717647); distFromNucleus_normalMaxVec_apical.push_back(0.204094118); distFromNucleus_normalMaxVec_apical.push_back(0.183764706); distFromNucleus_normalMaxVec_apical.push_back(0.217223529); distFromNucleus_normalMaxVec_apical.push_back(0.222870588); distFromNucleus_normalMaxVec_apical.push_back(0.226917647); distFromNucleus_normalMaxVec_apical.push_back(0.213223529); distFromNucleus_normalMaxVec_apical.push_back(0.221458824); distFromNucleus_normalMaxVec_apical.push_back(0.207011765); distFromNucleus_normalMaxVec_apical.push_back(0.201035294); distFromNucleus_normalMaxVec_apical.push_back(0.191858824); distFromNucleus_normalMaxVec_apical.push_back(0.191576471);//CellID62 for (int cellRank = 0; cellRank < allocPara_m.maxCellCount; cellRank++){ if (cellRank < allocPara_m.currentActiveCellCount){ // cellInfoVecs.distFromNucleus_normal[cellRank] = distFromNucleus_normalMax;//(distFromNucleus_normalMax) - (distFromNucleus_normalMax - (-14.0))*(1.0 - pow(distFromNucleus_normalVec[cellRank], 3.0)); if (cellRank >= 2 && cellRank <= 21){ cellInfoVecs.distFromNucleus_normal[cellRank] = 1.0*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 2){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else if (cellRank >= 22 && cellRank <= 42){ cellInfoVecs.distFromNucleus_normal[cellRank] = (1.0)*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 22){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else if (cellRank >= 43 && cellRank <= 62){ cellInfoVecs.distFromNucleus_normal[cellRank] = 1.0*distFromNucleus_normalMaxVec[cellRank]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 1.0*distFromNucleus_normalMaxVec_apical[cellRank]; if (cellRank == 43){ std::cout<<"cellInfoVecs.distFromNucleus_normal["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"cellInfoVecs.distFromNucleus_normal_apical["<<cellRank<<"] = "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; } } else{ cellInfoVecs.distFromNucleus_normal[cellRank] = -99999.9; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 99999.9; // std::cout<<"infoVecs.contractActomyo_multip["<<i<<"] = "<<infoVecs.contractActomyo_multip[i]<<std::endl; } } else{ cellInfoVecs.distFromNucleus_normal[cellRank] = -99999.9; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = 99999.9; } } } curTime = curTime + dt; bool tmpIsInitPhase= nodes->isInitPhase ; // std::cout << " *** 3 ***" << endl; if (nodes->isMemNodeTypeAssigned==false) { assignMemNodeType(); // Ali cout << " I assigned boolean values for membrane node types " << endl; nodes->isMemNodeTypeAssigned=true ; } if (nodes->isMemNodeTypeAssigned_postCellDivision==false){ cout<<" I begin to reassign boolean values for membrane node types post-growth"<<endl; assignMemNodeType(); cout<<" I reassigned boolean values for membrane node types post-growth"<<endl; nodes->isMemNodeTypeAssigned_postCellDivision=true; } if (nodes->isMemNodeTypeAssigned_postAddNode==false){ cout<<" I begin to reassign boolean values for membrane node types post-growth"<<endl; assignMemNodeType(); cout<<" I reassigned boolean values for membrane node types post-growth"<<endl; nodes->isMemNodeTypeAssigned_postAddNode=true; } // std::cout << " *** 4 ***" << endl; #ifdef debugModeECM cudaEventRecord(start2, 0); cudaEventSynchronize(start2); cudaEventElapsedTime(&elapsedTime1, start1, start2); #endif double contractileSpringGrowthProgressSpeed = 0.5*9.3405e-5;//9.3405e-5;//For minM14max4 //5.7081e-5;//For minM7max4 //7.2649e-05;//For minM7max7 double cellAreaGrowthProgressSpeed = 0.00022;//0.00011;//0.5*9.984e-5;//9.984e-5;//0.00002088;//0.02*0.000075/(4.0); double cellAreaGrowthProgressNonMitoticSpeed = 0.5*9.984e-5;//9.984e-5;//0.00002088;//0.02*0.000075/(4.0); double distFromBasalLocSpeed = 0.0001;//0.00005; double distFromApicalLocSpeed = 0.0001;//0.00005; if (timeRatio == 0){ std::cout<<"contractileSpringGrowthProgressSpeed = "<<contractileSpringGrowthProgressSpeed<<std::endl; std::cout<<"cellAreaGrowthProgressSpeed = "<<cellAreaGrowthProgressSpeed<<std::endl; std::cout<<"cellAreaGrowthProgressNonMitoticSpeed = "<<cellAreaGrowthProgressNonMitoticSpeed<<std::endl; std::cout<<"Calculating the number of apical node and basal node in each cell"<<std::endl; for (int i = 0; i < allocPara_m.maxCellCount*allocPara_m.maxAllNodePerCell; i++){ int cellRank = i/allocPara_m.maxAllNodePerCell; if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } for (int i = 0; i < allocPara_m.maxCellCount; i++){ cellInfoVecs.cellRankVec[i] = i; std::cout<<"Cell["<<i<<"] has "<<cellInfoVecs.numApicalVec[i]<<" apical nodes and "<<cellInfoVecs.numBasalVec[i]<<" basal nodes"<<std::endl; } } for (int w = 0; w < allocPara_m.currentActiveCellCount; w++){ if (cellInfoVecs.isEnteringMitotic[w] == true){ cellInfoVecs.contractileSpringGrowthProgress[w] += contractileSpringGrowthProgressSpeed;//0.00015; cellInfoVecs.cellAreaGrowthProgress[w] += cellAreaGrowthProgressSpeed; //0.00005; cellInfoVecs.cellAreaGrowthProgressNonMitotic[w] += cellAreaGrowthProgressNonMitoticSpeed;//0.00005; } else{ cellInfoVecs.contractileSpringGrowthProgress[w] = 0.0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[w] += cellAreaGrowthProgressNonMitoticSpeed;//0.00005; cellInfoVecs.cellAreaGrowthProgress[w] += cellAreaGrowthProgressSpeed;//0.00005; cellInfoVecs.distFromBasalLoc[w] += distFromBasalLocSpeed; cellInfoVecs.distFromApicalLoc[w] += distFromApicalLocSpeed; // if (w == 86){ // std::cout<<"cellInfoVecs.distFromNucleus_normal[w] : " <<cellInfoVecs.distFromNucleus_normal[w]<<std::endl; // std::cout<<"cellInfoVecs.distFromNucleus_normal_apical[w] : "<<cellInfoVecs.distFromNucleus_normal_apical[w]<<std::endl; // std::cout<<"distFromNucleus_normal[w]*_individualCellHeight[w] : "<<cellInfoVecs.distFromNucleus_normal[w]*cellInfoVecs.individualCellHeight[w]<<std::endl; // std::cout<<"distFromNucleus_normal_apical[w]*_individualCellHeight[w] : "<<cellInfoVecs.distFromNucleus_normal_apical[w]*cellInfoVecs.individualCellHeight[w]<<std::endl; // } } } // std::cout<< " *** 4.5 ***" << std::endl; int nucRepopuRate = 5000;//2000; // if (timeRatio > timeRatio_Crit_Division && relaxCount % nucRepopuRate == 0){ if (relaxCount % nucRepopuRate == 0){ // if (relaxCount == 100){ // std::cout<<"Nucleus are being repopuluated every "<<nucRepopuRate<<" time stpes"<<std::endl; // } uint targetCellRank; for (int i = 0; i < allocPara_m.currentActiveCellCount; i++){ if (cellInfoVecs.isEnteringMitotic[i] == false){ continue; } targetCellRank = i; if (targetCellRank < 2){ continue; } else if (targetCellRank > 62 && targetCellRank < 86){ continue; } else if (cellInfoVecs.activeIntnlNodeCounts[targetCellRank] >= allocPara_m.maxIntnlNodePerCell){ continue; } int startIndex = targetCellRank*allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; int currentNumOfNucleus = 0; for (int j = startIndex; j < (targetCellRank+1)*allocPara_m.maxAllNodePerCell; j++){ if (nodes->getInfoVecs().nodeIsActive[j] == true){ currentNumOfNucleus+=1; } } int numOfMissingNulceus = allocPara_m.maxIntnlNodePerCell - currentNumOfNucleus; // std::cout<<"Currently missing "<<numOfMissingNulceus<<" nucleus particle in cell "<<targetCellRank<<std::endl; double total_nucNodeLocX = 0.0; double total_nucNodeLocY = 0.0; for (int k = 0; k < currentNumOfNucleus; k++){ total_nucNodeLocX += nodes->getInfoVecs().nodeLocX[startIndex + k]; total_nucNodeLocY += nodes->getInfoVecs().nodeLocY[startIndex + k]; } total_nucNodeLocX = total_nucNodeLocX/currentNumOfNucleus; total_nucNodeLocY = total_nucNodeLocY/currentNumOfNucleus; nodes->getInfoVecs().nodeLocX[startIndex + currentNumOfNucleus] = total_nucNodeLocX; nodes->getInfoVecs().nodeLocY[startIndex + currentNumOfNucleus] = total_nucNodeLocY; nodes->getInfoVecs().nodeIsActive[startIndex + currentNumOfNucleus] = true; // if (numOfMissingNulceus != 0){ // for (int k = 0; k < numOfMissingNulceus; k++){ // nodes->getInfoVecs().nodeLocX[startIndex + currentNumOfNucleus + k] = // nodes->getInfoVecs().nodeLocX[startIndex + k] + 0.01; // nodes->getInfoVecs().nodeLocY[startIndex + currentNumOfNucleus + k] = // nodes->getInfoVecs().nodeLocY[startIndex + k] + 0.01; // nodes->getInfoVecs().nodeIsActive[startIndex + currentNumOfNucleus + k] = true; // } // } currentNumOfNucleus = 0; for (int j = startIndex; j < (targetCellRank+1)*allocPara_m.maxAllNodePerCell; j++){ if (nodes->getInfoVecs().nodeIsActive[j] == true){ currentNumOfNucleus+=1; } cellInfoVecs.activeIntnlNodeCounts[targetCellRank] = currentNumOfNucleus; } // std::cout<<"number of nucleus nodes in cell["<<targetCellRank<<"] = "<<currentNumOfNucleus<<" after repopulation"<<std::endl; // std::cout<<"CellRank = "<<targetCellRank<<"new activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[targetCellRank]<<std::endl; } } // std::cout << " *** 4.75 ***" << endl; // computeApicalLoc(timeRatio, timeRatio_Crit_Division); //Ali // ////// // // std::cout << " *** 5 ***" << endl; // computeBasalLoc(); //Ali // std::cout << " *** 6 ***" << endl; // for (int i = 0; i < allocPara_m.currentActiveCellCount; i++){ // cellInfoVecs.individualCellHeight[i] = sqrt((cellInfoVecs.apicalLocX[i] - cellInfoVecs.basalLocX[i])*(cellInfoVecs.apicalLocX[i] - cellInfoVecs.basalLocX[i]) + // (cellInfoVecs.apicalLocY[i] - cellInfoVecs.basalLocY[i])*(cellInfoVecs.apicalLocY[i] - cellInfoVecs.basalLocY[i])); // cellInfoVecs.individualCellHeight_springHeight[i] = cellInfoVecs.individualCellHeight[i]*0.3; // //Note: what is calculated here really? Well, we want to extend a distance away from the cell center (-ish) position to determine how far // // contractile spring will be placed. We now assume that the upper and lower 20% of the cell height is covered by contractile spring, // // therefore, leading to the non-spring portion taking 60% (hence 30% above center and 30% below center). // cellInfoVecs.distFromNucleus_normal[i] = -1.0*cellInfoVecs.individualCellHeight_springHeight[i]; // cellInfoVecs.distFromNucleus_normal_apical[i] = 1.0*cellInfoVecs.individualCellHeight_springHeight[i]; // } computeApicalLoc(timeRatio, timeRatio_Crit_Division); computeBasalLoc(); uint recalculateCellHeight = 4; if (timeRatio == 0){// || relaxCount%recalculateCellHeight == 0){ computeIndividualCellHeight(distFromNucleus_normalMax1,distFromNucleus_normalMax2,distFromNucleus_normalMax3, distFromNucleus_normalMax_apical1,distFromNucleus_normalMax_apical2,distFromNucleus_normalMax_apical3);//This function does the above commented out computation. // computeIndividualCellHeight_Ver2();//Ver2 does cell height calculation only, not manipulating the number of contractile springs. } else if (relaxCount%recalculateCellHeight == 0){ computeIndividualCellHeight_Ver2();//Ver2 does cell height calculation only, not manipulating the number of contractile springs. } #ifdef debugModeECM cudaEventRecord(start3, 0); cudaEventSynchronize(start3); cudaEventElapsedTime(&elapsedTime2, start2, start3); #endif // eCMCellInteraction(cellPolar,subCellPolar,tmpIsInitPhase, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount); eCMCellInteraction(cellPolar,subCellPolar,tmpIsInitPhase, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount, mitoticThreshold); if (cellInfoVecs.isOneTimeStepPostDivision != false){ std::cout<<"Condition ' cellInfoVecs.isOneTimeStepPostDivision != false ' is satisfied"<<std::endl; std::cout<<"cellInfoVecs.isOneTimeStepPostDivision = "<<cellInfoVecs.isOneTimeStepPostDivision<<std::endl; std::cout<<"cellInfoVecs.isPostDivision = "<<cellInfoVecs.isPostDivision<<std::endl; /*for (int kk = 0; kk < cellInfoVecs.eCMNeighborId.size(); kk++){ std::cout<<"eCMNeighborId["<<kk<<"] after division= "<<cellInfoVecs.eCMNeighborId[kk]<<std::endl; }*/ } if (cellInfoVecs.isPostDivision == true){ cellInfoVecs.isOneTimeStepPostDivision = true; cellInfoVecs.isPostDivision = false; std::cout<<"Condition ' cellInfoVecs.isPostDivision == true ' is satisfied"<<std::endl; std::cout<<"cellInfoVecs.isOneTimeStepPostDivision = "<<cellInfoVecs.isOneTimeStepPostDivision<<std::endl; std::cout<<"cellInfoVecs.isPostDivision = "<<cellInfoVecs.isPostDivision<<std::endl; } else if (cellInfoVecs.isPostDivision == false && cellInfoVecs.isOneTimeStepPostDivision == true){ cellInfoVecs.isTwoTimeStepPostDivision = false;//true; cellInfoVecs.isOneTimeStepPostDivision = false; } else{ cellInfoVecs.isOneTimeStepPostDivision = false; cellInfoVecs.isTwoTimeStepPostDivision = false; } if (cellInfoVecs.isPostAddMembrNodes == true){ cellInfoVecs.isPostAddMembrNodes = false; } // if (timeRatio > timeRatio_Crit_Division && nodes->isECMPairPrinted==false){ // if (timeRatio == timeRatio_Crit_Division){ // // std::cout<<"bdrynodecount = "<<allocPara_m.bdryNodeCount<<std::endl; // // for (int kk = 0; kk < cellInfoVecs.basalLocX.size(); kk++){ // // // std::cout<<"BasalLocX["<<kk<<"] = "<< cellInfoVecs.basalLocX[kk]<<std::endl; // // // std::cout<<"BasalLocY["<<kk<<"] = "<< cellInfoVecs.basalLocY[kk]<<std::endl; // // } // for (int kk = 0; kk < (allocPara_m.currentActiveCellCount*allocPara_m.maxAllNodePerCell); kk++){ // if (kk/allocPara_m.maxAllNodePerCell ==86 || kk/allocPara_m.maxAllNodePerCell == 31 || kk/allocPara_m.maxAllNodePerCell == 32 ||kk/allocPara_m.maxAllNodePerCell == 30){ // } // else{ // continue; // } // } // for (int kk = 0; kk < cellInfoVecs.eCMNeighborId.size(); kk++){ // std::cout<<"eCMNeighborId["<<kk<<"] = "<<cellInfoVecs.eCMNeighborId[kk]<<std::endl; // } // nodes->isECMPairPrinted=true; // } // std::cout << " *** 7 ***" << endl; #ifdef debugModeECM cudaEventRecord(start4, 0); cudaEventSynchronize(start4); cudaEventElapsedTime(&elapsedTime3, start3, start4); #endif computeCenterPos_M2(); //Ali // std::cout << " *** 8 ***" << endl; computeInternalAvgPos_M(); //Ali // right now internal points represent nucleus // std::cout << " *** 9 ***" << endl; //computeNucleusLoc() ; #ifdef debugModeECM cudaEventRecord(start5, 0); cudaEventSynchronize(start5); cudaEventElapsedTime(&elapsedTime4, start4, start5); #endif if (isInitNucPercentCalculated==false && controlPara.resumeSimulation==0) { computeNucleusIniLocPercent(); //Ali writeNucleusIniLocPercent(); //Ali isInitNucPercentCalculated=true ; cout << " I computed initial location of nucleus positions in percent" << endl; } else if (isInitNucPercentCalculated==false && controlPara.resumeSimulation==1){ readNucleusIniLocPercent(); //Ali isInitNucPercentCalculated=true ; for (int j = 0; j < cellInfoVecs.nucleusLocPercent.size(); j++){ std::cout<<"nucleusLocPercent["<<j<<"] = "<<cellInfoVecs.nucleusLocPercent[j]<<std::endl; } cout << " I read initial location of nucleus positions in percent, since I am in resume mode" << endl; } // std::cout << " *** 10 ***" << endl; // if (timeRatio == 0){ computeNucleusDesireLoc() ; // Ali // } // if (timeRatio == timeRatio_Crit_Division || timeRatio == timeRatio_Crit_Division+0.2){ // std::cout<<"timeRatio = "<<timeRatio<<std::endl; // std::cout<<"cellInfoVecs.nucDesireDistApical[31] = "<<cellInfoVecs.nucDesireDistApical[31]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordX[31] = "<<cellInfoVecs.centerCoordX[31]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordY[31] = "<<cellInfoVecs.centerCoordY[31]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocX[31] = "<<cellInfoVecs.apicalLocX[31]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocY[31] = "<<cellInfoVecs.apicalLocY[31]<<std::endl; // std::cout<<"cellInfoVecs.nucDesireDistApical[87] = "<<cellInfoVecs.nucDesireDistApical[87]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordX[87] = "<<cellInfoVecs.centerCoordX[87]<<std::endl; // std::cout<<"cellInfoVecs.centerCoordY[87] = "<<cellInfoVecs.centerCoordY[87]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocX[87] = "<<cellInfoVecs.apicalLocX[87]<<std::endl; // std::cout<<"cellInfoVecs.apicalLocY[87] = "<<cellInfoVecs.apicalLocY[87]<<std::endl; // } // std::cout << " *** 11 ***" << endl; #ifdef debugModeECM cudaEventRecord(start6, 0); cudaEventSynchronize(start6); cudaEventElapsedTime(&elapsedTime5, start5, start6); #endif // if (tmpIsInitPhase==false) { // updateInternalAvgPosByNucleusLoc_M (); // } //PlotNucleus (lastPrintNucleus, outputFrameNucleus) ; //BC_Imp_M() ; //Ali applySceCellDisc_M(); // std::cout << " *** 12 ***" << endl; #ifdef debugModeECM cudaEventRecord(start7, 0); cudaEventSynchronize(start7); cudaEventElapsedTime(&elapsedTime6, start6, start7); #endif if (isBasalActinPresent) { // cout << " I am applying basal contraction" << endl ; // applyMembContraction(timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division, distFromNucleus_max, distFromNucleus_min, percentage_before_timeRatio_Crit_Division_scaling) ; // Ali applyMembContraction2(timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division, distFromNucleus_max, distFromNucleus_min,mitoRndActomyoStrengthScaling);// percentage_before_timeRatio_Crit_Division_scaling) ; // Kevin } // std::cout << " *** 13 ***" << endl; #ifdef debugModeECM cudaEventRecord(start8, 0); cudaEventSynchronize(start8); cudaEventElapsedTime(&elapsedTime7, start7, start8); #endif // applyNucleusEffect() ; // applyForceInteractionNucleusAsPoint() ; applyMemForce_M(cellPolar,subCellPolar); // if (timeRatio > timeRatio_Crit_Division && nodes->isActinLevelDisplayed == false){ // for (int w = 0; w < nodes->getInfoVecs().nodeActinLevel.size(); w++){ // if (w/allocPara_m.maxAllNodePerCell == 31 || w/allocPara_m.maxAllNodePerCell == 86){ // std::cout<<"actin level["<<w/allocPara_m.maxAllNodePerCell<<"] = "<<nodes->getInfoVecs().nodeActinLevel[w]<<std::endl; // } // } // nodes->isActinLevelDisplayed = true; // } // std::cout << " *** 14 ***" << endl; #ifdef debugModeECM cudaEventRecord(start9, 0); cudaEventSynchronize(start9); cudaEventElapsedTime(&elapsedTime8, start8, start9); #endif applyVolumeConstraint(timeRatio, timeRatio_Crit_Division, volume_Increase_Target_Ratio, volume_Increase_Scale, postDivision_restorationRateScale, cycle); //Ali // std::cout << " *** 15 ***" << endl; if (timeRatio > timeRatio_Crit_Division && nodes->isActinLevelDisplayed == false){ for (int kk = 0; kk < cellInfoVecs.cellAreaVec.size(); kk++){ // if (kk == 31 || kk == 86){ // std::cout<<"Cell["<<kk<<"] area = "<<cellInfoVecs.cellAreaVec[kk]<<std::endl; // } // for (int kk = 0; kk < allocPara_m.maxCellCount; kk++){ // double cellRank = kk; // uint intnlIndxMemBegin = cellRank * allocPara_m.maxAllNodePerCell; // uint intnlIndxBegin = cellRank * allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; // uint intnlIndxEnd = intnlIndxBegin + cellInfoVecs.activeIntnlNodeCounts[kk]; // std::cout<<"cellRank = "<<cellRank<<", intnlIndxMemBegin = "<<intnlIndxMemBegin<<", intnlIndxBegin = "<<intnlIndxBegin<<", intnlIndxEnd = "<<intnlIndxEnd<<std::endl; // } } // for (int kk = 0; kk < cellInfoVecs.Cell_Damp.size(); kk++){ // std::cout<<"Node Damping for cell ["<<kk<<"] = "<<cellInfoVecs.Cell_Damp[kk]<<std::endl; // } nodes->isActinLevelDisplayed=true; } #ifdef debugModeECM cudaEventRecord(start10, 0); cudaEventSynchronize(start10); cudaEventElapsedTime(&elapsedTime9, start9, start10); #endif //ApplyExtForces() ; // now for single cell stretching //computeContractileRingForces() ; // computeCenterPos_M(); //Ali cmment // // if (isCellGrowSet==false) { // growAtRandom_M(dt); // growAtRandom_M(growthProgressSpeed); growAtRandom_M_Ver2(growthProgressSpeed, mitoticThreshold); // cout << "I set the growth level. Since the cells are not growing a divising for this simulation I won't go inside this function any more" << endl ; // isCellGrowSet=true ; // } // std::cout << " *** 16 ***" << endl; relaxCount=relaxCount+1 ; // std::cout << " *** 17 ***" << endl; distributeCellGrowthProgress_M(); // std::cout << " *** 18 ***" << endl; findTangentAndNormal_M();//AAMIRI ADDED May29 // std::cout << " *** 19 ***" << endl; StoreNodeOldPositions() ; // std::cout << " *** 20 ***" << endl; #ifdef debugModeECM cudaEventRecord(start11, 0); cudaEventSynchronize(start11); cudaEventElapsedTime(&elapsedTime10, start10, start11); #endif allComponentsMove_M(); // std::cout << " *** 21 ***" << endl; int checkForMitosisAndDivision = 250; int checkForOverextension = 500; bool membrAddingNode = false; // if (1 < 0){ if (relaxCount % checkForMitosisAndDivision == 0){ if (allocPara_m.currentActiveCellCount < allocPara_m.maxCellCount){ // std::cout<<"contractileSpringGrowthProgress[10] = "<<cellInfoVecs.contractileSpringGrowthProgress[10]<<std::endl; enterMitoticCheckForDivAxisCal(mitoticThreshold) ; divide2D_M(volume_Increase_Target_Ratio, timeRatio, thresholdToIntroduceNewCell); // std::cout<<" *** 4.25 *** "<< std::endl; if (relaxCount % checkForOverextension == 0){ updateMembrGrowthProgress_M(); handleMembrGrowth_M(maxApicalBasalNodeNum, maxLengthToAddMemNodes); } nodes->adhUpdate=true; } } #ifdef debugModeECM cudaEventRecord(start12, 0); cudaEventSynchronize(start12); cudaEventElapsedTime(&elapsedTime11, start11, start12); #endif //allComponentsMoveImplicitPart() ; // if (relaxCount%checkForMitosisAndDivision==0) { // updateMembrGrowthProgress_M(); // handleMembrGrowth_M(); // // std::cout << " *** 10 ***" << endl; // std::cout.flush(); // // relaxCount=0 ; // Ali // nodes->adhUpdate=true; // Ali // } # ifdef debugModeECM cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime12, start12, stop); std::cout << "time 1 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in cell for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; std::cout << "time 4 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ; std::cout << "time 5 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ; std::cout << "time 6 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ; std::cout << "time 7 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ; std::cout << "time 8 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ; std::cout << "time 9 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime9 << endl ; std::cout << "time 10 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime10 << endl ; std::cout << "time 11 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime11 << endl ; std::cout << "time 12 spent in cell module for moving the membrane node of cells and ECM nodes are: " << elapsedTime12 << endl ; #endif // std::cout << " *** 22 ***" << endl; } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; std::vector<CVector> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpApicalLoc = thrust::device_vector<double>(2,0.0); divAuxData.tmpBasalLoc = thrust::device_vector<double>(2,0.0); divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( divAuxData.nodeStorageCount, -1); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIntAvgX_M = thrust::device_vector<double>( //Ali divAuxData.toBeDivideCount, 0); divAuxData.tmpIntAvgY_M = thrust::device_vector<double>( //Ali divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType1 = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType2 = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin(), divAuxData.tmpNodeType.begin(), divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin(), divAuxData.tmpIntAvgX_M.begin(), divAuxData.tmpIntAvgY_M.begin() )), isTrue()); for (int w = 0; w < cellInfoVecs.isDividing.size(); w++){ if (cellInfoVecs.isDividing[w] == true){ divAuxData.tmpApicalLoc[0] = cellInfoVecs.apicalLocX[w]; divAuxData.tmpApicalLoc[1] = cellInfoVecs.apicalLocY[w]; divAuxData.tmpBasalLoc[0] = cellInfoVecs.basalLocX[w]; divAuxData.tmpBasalLoc[1] = cellInfoVecs.basalLocY[w]; } } } void SceCells::copyCellsForPrintingOnly_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; printAuxData.nodeStorageCount = printAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; // std::cout<<"copy 1"<<std::endl; printAuxData.tmpApicalLoc = thrust::device_vector<double>(2,0.0); printAuxData.tmpBasalLoc = thrust::device_vector<double>(2,0.0); // std::cout<<"copy 2"<<std::endl; printAuxData.tmpIsActive_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, true); printAuxData.tmpNodePosX_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodePosY_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali printAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( printAuxData.nodeStorageCount, -1); // std::cout<<"copy 3"<<std::endl; printAuxData.tmpCellRank_M = thrust::device_vector<uint>( printAuxData.toBeDivideCount, 0); printAuxData.tmpDivDirX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpDivDirY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpCenterPosX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpCenterPosY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); printAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( printAuxData.toBeDivideCount, 0); // std::cout<<"copy 4"<<std::endl; printAuxData.tmpIntAvgX_M = thrust::device_vector<double>( //Ali printAuxData.toBeDivideCount, 0); printAuxData.tmpIntAvgY_M = thrust::device_vector<double>( //Ali printAuxData.toBeDivideCount, 0); // std::cout<<"copy 5"<<std::endl; printAuxData.tmpIsActive1_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, false); printAuxData.tmpXPos1_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpYPos1_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType1 = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali // std::cout<<"copy 6"<<std::endl; printAuxData.tmpIsActive2_M = thrust::device_vector<bool>( printAuxData.nodeStorageCount, false); printAuxData.tmpXPos2_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpYPos2_M = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpNodeType2 = thrust::device_vector<MembraneType1>( printAuxData.nodeStorageCount, notAssigned1); //Ali // std::cout<<"copy 7"<<std::endl; //A&A printAuxData.tmpHertwigXdir = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); printAuxData.tmpHertwigYdir = thrust::device_vector<double>( printAuxData.nodeStorageCount, 0.0); //A&A // std::cout<<"copy 8"<<std::endl; // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(printAuxData.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(printAuxData.tmpIsActive_M.begin(), printAuxData.tmpNodePosX_M.begin(), printAuxData.tmpNodePosY_M.begin(), printAuxData.tmpNodeType.begin(), printAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // std::cout<<"copy 9"<<std::endl; // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(printAuxData.tmpCellRank_M.begin(), printAuxData.tmpDivDirX_M.begin(), printAuxData.tmpDivDirY_M.begin(), printAuxData.tmpHertwigXdir.begin(), printAuxData.tmpHertwigYdir.begin(), printAuxData.tmpCenterPosX_M.begin(), printAuxData.tmpCenterPosY_M.begin(), printAuxData.tmpIntAvgX_M.begin(), printAuxData.tmpIntAvgY_M.begin() )), isTrue()); // std::cout<<"copy 10"<<std::endl; // for (int w = 0; w < printAuxData.isDividing.size(); w++){ // if (printAuxData.isDividing[w] == true){ // printAuxData.tmpApicalLoc[0] = cellInfoVecs.apicalLocX[w]; // printAuxData.tmpApicalLoc[1] = cellInfoVecs.apicalLocY[w]; // printAuxData.tmpBasalLoc[0] = cellInfoVecs.basalLocX[w]; // printAuxData.tmpBasalLoc[1] = cellInfoVecs.basalLocY[w]; // } // } } // void SceCells::copyCellsEnterMitotic() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount // * allocPara_m.maxAllNodePerCell; // std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; // divAuxData.tmpIsActive_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, true); // divAuxData.tmpNodePosX_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodePosY_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( // divAuxData.nodeStorageCount, notAssigned1); //Ali // divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( // divAuxData.nodeStorageCount, -1); // divAuxData.tmpCellRank_M = thrust::device_vector<uint>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpDivDirX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpDivDirY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( // divAuxData.toEnterMitoticCount, 0); // divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // // step 2 , continued // copy node info values ready for division /comment A&A // thrust::counting_iterator<uint> iStart(0); // // thrust::copy_if( // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)), // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)) // // + totalNodeCountForActiveCells, // // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // // make_transform_iterator(iStart, // // DivideFunctor(allocPara_m.maxAllNodePerCell))), // // thrust::make_zip_iterator( // // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // // divAuxData.tmpNodePosX_M.begin(), // // divAuxData.tmpNodePosY_M.begin(), // // divAuxData.tmpNodeType.begin())), isTrue()); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin(), // divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A // thrust::counting_iterator<uint> iBegin(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), // divAuxData.tmpDivDirX_M.begin(), // divAuxData.tmpDivDirY_M.begin(), // divAuxData.tmpCenterPosX_M.begin(), // divAuxData.tmpCenterPosY_M.begin())), isTrue()); // } // void SceCells::copyCellsEnterDivision() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // divAuxData.nodeStorageCount = divAuxData.toBeDivideCount // * allocPara_m.maxAllNodePerCell; // std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; // divAuxData.tmpIsActive_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, true); // divAuxData.tmpNodePosX_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodePosY_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( // divAuxData.nodeStorageCount, notAssigned1); //Ali // divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( // divAuxData.nodeStorageCount, -1); // divAuxData.tmpCellRank_M = thrust::device_vector<uint>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpDivDirX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpDivDirY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( // divAuxData.toBeDivideCount, 0); // divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos1_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( // divAuxData.nodeStorageCount, false); // divAuxData.tmpXPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // divAuxData.tmpYPos2_M = thrust::device_vector<double>( // divAuxData.nodeStorageCount, 0.0); // // step 2 , continued // copy node info values ready for division /comment A&A // thrust::counting_iterator<uint> iStart(0); // // thrust::copy_if( // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)), // // thrust::make_zip_iterator( // // thrust::make_tuple( // // nodes->getInfoVecs().nodeIsActive.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocX.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().nodeLocY.begin() // // + allocPara_m.bdryNodeCount, // // nodes->getInfoVecs().memNodeType1.begin() // // + allocPara_m.bdryNodeCount)) // // + totalNodeCountForActiveCells, // // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // // make_transform_iterator(iStart, // // DivideFunctor(allocPara_m.maxAllNodePerCell))), // // thrust::make_zip_iterator( // // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // // divAuxData.tmpNodePosX_M.begin(), // // divAuxData.tmpNodePosY_M.begin(), // // divAuxData.tmpNodeType.begin())), isTrue()); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeMemMirrorIndex.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin(), // divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A // thrust::counting_iterator<uint> iBegin(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), // cellInfoVecs.growthYDir.begin(), // cellInfoVecs.centerCoordX.begin(), // cellInfoVecs.centerCoordY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), // divAuxData.tmpDivDirX_M.begin(), // divAuxData.tmpDivDirY_M.begin(), // divAuxData.tmpCenterPosX_M.begin(), // divAuxData.tmpCenterPosY_M.begin())), isTrue()); // } void SceCells::copyCellsEnterDivision() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; std::cout<<"nodeStorageCount = "<<divAuxData.nodeStorageCount<<std::endl; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodeType = thrust::device_vector<MembraneType1>( divAuxData.nodeStorageCount, notAssigned1); //Ali divAuxData.tmpNodeMemMirrorIndex_M = thrust::device_vector<int>( divAuxData.nodeStorageCount, -1); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpNucleusCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount)), // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().memNodeType1.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), // make_transform_iterator(iStart, // DivideFunctor(allocPara_m.maxAllNodePerCell))), // thrust::make_zip_iterator( // thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), // divAuxData.tmpNodePosX_M.begin(), // divAuxData.tmpNodePosY_M.begin(), // divAuxData.tmpNodeType.begin())), isTrue()); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().memNodeType1.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeMemMirrorIndex.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin(), divAuxData.tmpNodeType.begin(), divAuxData.tmpNodeMemMirrorIndex_M.begin())), isTrue()); // step 3 , continued for cell properties //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); divAuxData.isMotherCellBehind.clear(); //Ali //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; vector<CVector> cellCenterLine_Basal2Apical; vector<CVector> cellCenterLine_Apical2Basal; vector<CVector> cellCenterLine_Basal2Apical_leftShift; vector<CVector> cellCenterLine_Basal2Apical_rightShift; vector<CVector> cellCenterLine_Apical2Basal_leftShift; vector<CVector> cellCenterLine_Apical2Basal_rightShift; vector<double> cellCenterLine_MirrorLength_Basal2Apical; vector<double> cellCenterLine_MirrorLength_Apical2Basal; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // std::cout<<"HAHA ERROR 1"<<std::endl; // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali obtainMembrAndIntnlNodesPlusNodeType2(i, membrNodes, intnlNodes, nodeTypeIndxDiv, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_Basal2Apical_rightShift, cellCenterLine_Apical2Basal_leftShift, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Basal2Apical, cellCenterLine_MirrorLength_Apical2Basal); // std::cout<<"HAHA ERROR 2"<<std::endl; CVector oldCellCenter = obtainCellCenter(i); // std::cout<<"oldCellCenter = "<<oldCellCenter.x<<" "<<oldCellCenter.y<<std::endl; // std::cout<<"HAHA ERROR 3"<<std::endl; CVector oldNucleusCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // std::cout<<"HAHA ERROR 4"<<std::endl; // CVector oldIntCenter = obtainIntCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir, true_divDir; // divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A // divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A divDir.x = divAuxData.tmpBasalLoc[0] - oldNucleusCenter.x; divDir.y = divAuxData.tmpBasalLoc[1] - oldNucleusCenter.y; true_divDir = divDir.rotateNegativeNintyDeg_XY_CC(); // std::cout<<"true_divDir = "<<true_divDir.x<<" "<<true_divDir.y<<std::endl; // std::cout<<"HAHA ERROR 5"<<std::endl; // double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCellCenter, membrNodes);//A&A added double lenAlongHertwigAxis = calLengthAlongHertwigAxis(true_divDir, oldNucleusCenter, membrNodes); // std::cout<<"HAHA ERROR 6"<<std::endl; std::vector<VecValT> tmp1Membr, tmp2Membr; CVector intCell1Center, intCell2Center; // obtain the center of two cell along the shortest distance between the membrane nodes of mother cell. There is also a tuning factor to shift the centers inside the cell "shiftRatio" // obtainTwoNewIntCenters(oldIntCenter, divDir, lenAlongHertwigAxis, intCell1Center, // intCell2Center); obtainTwoNewIntCenters(oldNucleusCenter, true_divDir, lenAlongHertwigAxis, intCell1Center, intCell2Center); // decide each membrane nodes and internal nodes of mother cell is going to belongs to daugther cell 1 or 2. Also shrink the internal nod position along the aixs connecting mother cell to the internal nodes by a factor given as an input in the name of "Shrink ratio" // prepareTmpVec(i, divDir, oldCellCenter, oldIntCenter,tmp1Membr, tmp2Membr); prepareTmpVec(i, divDir, oldNucleusCenter, oldNucleusCenter,tmp1Membr, tmp2Membr); // for (int n = 0; n < tmp1Membr.size(); n++){ // // std::cout<<"tmp1Membr["<<n<<"].val = "<<tmp1Membr[n].val<<", .vec = "<<tmp1Membr[n].vec.x<<" "<<tmp1Membr[n].vec.y<<" "<<tmp1Membr[n].vec.z<<", type = "<<tmp1Membr[n].type<<std::endl; // } // for (int m = 0; m < tmp2Membr.size(); m++){ // // std::cout<<"tmp2Membr["<<m<<"].val = "<<tmp2Membr[m].val<<", .vec = "<<tmp2Membr[m].vec.x<<" "<<tmp2Membr[m].vec.y<<" "<<tmp2Membr[m].vec.z<<", type = "<<tmp2Membr[m].type<<std::endl; // } //create the two new membrane line based on the specified distance. // processMemVec(i, tmp1Membr, tmp2Membr, oldNucleusCenter); processMemVec_Ver2(i, tmp1Membr, tmp2Membr, oldNucleusCenter, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_Basal2Apical_rightShift, cellCenterLine_Apical2Basal_leftShift, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Basal2Apical,cellCenterLine_MirrorLength_Apical2Basal ); // shift the internal to make sure the center of new daugther cell is exactly similar to what have chosen in the function "obtainTwoNewCenters" shiftIntnlNodesByCellCenter(intCell1Center, intCell2Center); // assemble two new daughter cells information. assembleVecForTwoCells(i); } //divDebug(); } void SceCells::printCellCenterLine_M(int p) { printAuxData.tmp1MemActiveCounts.clear(); printAuxData.tmp1InternalActiveCounts.clear(); printAuxData.tmp2MemActiveCounts.clear(); printAuxData.tmp2InternalActiveCounts.clear(); printAuxData.isMotherCellBehind.clear(); for (uint i = 0; i < printAuxData.toBeDivideCount; i++) { printAuxData.tmp1IntnlVec.clear(); printAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; vector<CVector> cellCenterLine_Basal2Apical; vector<CVector> cellCenterLine_Apical2Basal; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // std::cout<<"HAHA ERROR 1"<<std::endl; // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali obtainMembrAndIntnlNodesPlusNodeType2_printingOnly(i, membrNodes, intnlNodes, nodeTypeIndxDiv, cellCenterLine_Basal2Apical, cellCenterLine_Apical2Basal); for (int j = 0; j < cellCenterLine_Apical2Basal.size(); j++){ std::cout<<"cellRank = "<<p<<" "<<cellCenterLine_Apical2Basal[j].x<<" "<<cellCenterLine_Apical2Basal[j].y<<std::endl; } } } //A&A // void SceCells::findHertwigAxis() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes,nodeTypeIndxDiv); // Ali // CVector oldCellCenter = obtainCellCenter(i);// cell center // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // }// This is the original code that find HertwigAxis based on cell center //This is the new code that find HertwigAxis based on nucleus center // void SceCells::findHertwigAxis() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // // CVector oldCellCenter = obtainCellCenter(i);// cell center // CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // } // void SceCells::findHertwigAxis_useBasalApicalLoc() { // divAuxData.tmp1MemActiveCounts.clear(); // divAuxData.tmp1InternalActiveCounts.clear(); // divAuxData.tmp2MemActiveCounts.clear(); // divAuxData.tmp2InternalActiveCounts.clear(); // //divDebug(); // for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { // uint cellRank = divAuxData.tmpCellRank_M[i]; // vector<CVector> membrNodes; // vector<CVector> intnlNodes; // vector<MembraneType1> nodeTypeIndxDiv ; // std::pair <int ,int > ringIds ; // //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); // obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // // CVector oldCellCenter = obtainCellCenter(i);// cell center // CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin // double lenAlongMajorAxis; // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); // //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // // lenAlongMajorAxis); //Ali // // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali // CVector divDir; // CVector tmpDivDir; // tmpDivDir.x = cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]; // tmpDivDir.y = cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank]; // double length = sqrt((cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank])*(cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]) + // (cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])*(cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])); // tmpDivDir.x = tmpDivDir.x/length; // tmpDivDir.y = tmpDivDir.y/length; // divDir = tmpDivDir.rotateNintyDeg_XY_CC(); // cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; // cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; // std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; // std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; // std::cout<<divDir.x<<"HertwigXdir " <<std::endl; // std::cout<<divDir.y<<"HertwigYdir " <<std::endl; // } // //divDebug(); // } void SceCells::findHertwigAxis_useBasalApicalLoc() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); uint isDividingCount = 0; //divDebug(); for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ isDividingCount += 1; } } if (isDividingCount > 1){ std::cout<<"More than one cell undergoing division! Instability very likely to occur!"<<std::endl; } for (uint i = 0; i < isDividingCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; vector<MembraneType1> nodeTypeIndxDiv ; std::pair <int ,int > ringIds ; //obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); obtainMembrAndIntnlNodesPlusNodeType(i, membrNodes, intnlNodes, nodeTypeIndxDiv); // CVector oldCellCenter = obtainCellCenter(i);// cell center CVector oldCellCenter = obtainNucleusCenter(i, intnlNodes);// Kevin double lenAlongMajorAxis; //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); //Ali // CVector divDir = calDivDir_ApicalBasal(oldCellCenter, membrNodes, // lenAlongMajorAxis,nodeTypeIndxDiv); //Ali CVector divDir; CVector tmpDivDir; tmpDivDir.x = cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]; tmpDivDir.y = cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank]; double length = sqrt((cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank])*(cellInfoVecs.apicalLocX[cellRank] - cellInfoVecs.basalLocX[cellRank]) + (cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])*(cellInfoVecs.apicalLocY[cellRank] - cellInfoVecs.basalLocY[cellRank])); tmpDivDir.x = tmpDivDir.x/length; tmpDivDir.y = tmpDivDir.y/length; divDir = tmpDivDir.rotateNintyDeg_XY_CC(); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; // ringIds =calApicalBasalRingIds(divDir, oldCellCenter, membrNodes,nodeTypeIndxDiv); //Ali // // it is local membrane id ; // cellInfoVecs.ringApicalId[cellRank]=ringIds.first ; // cellInfoVecs.ringBasalId [cellRank]=ringIds.second ; std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; std::cout<<divDir.x<<"HertwigXdir " <<std::endl; std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M(double quiescence1, double quiescence1_half) { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; //Ali to preserve the neighbors information of each cell for the copySecondCellArr_M function if two neighbor cell divide at eaxctly one time step and the order // of mother and daughter cells are oppposite the methodology won't work. I think it almost never this situation will happen. // cout<<"nodeCellRankFront size = "<<nodes->getInfoVecs().nodeCellRankFront.size()<<std::endl; // cout<<"nodeCellRankFrontOld size = "<<nodes->getInfoVecs().nodeCellRankFrontOld.size()<<std::endl; // cout<<"nodeCellRankBehind size = "<<nodes->getInfoVecs().nodeCellRankBehind.size()<<std::endl; // cout<<"nodeCellRankBehindOld size = "<<nodes->getInfoVecs().nodeCellRankBehindOld.size()<<std::endl; thrust::copy (nodes->getInfoVecs().nodeCellRankFront.begin(),nodes->getInfoVecs().nodeCellRankFront.begin()+allocPara_m.currentActiveCellCount, nodes->getInfoVecs().nodeCellRankFrontOld.begin()) ; thrust::copy (nodes->getInfoVecs().nodeCellRankBehind.begin(),nodes->getInfoVecs().nodeCellRankBehind.begin()+allocPara_m.currentActiveCellCount, nodes->getInfoVecs().nodeCellRankBehindOld.begin()) ; cout << "Number of cells ready to divide in this time step is " <<divAuxData.toBeDivideCount << endl ; if (divAuxData.toBeDivideCount>1) { cout << "Warnining: at Least two cells divided at the same time step chance of error in finding next neighbor of each cell"<< endl ; } for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; std::cout<<"cellRank undergoing division = "<<cellRank<<std::endl; uint cellRankDaughter = allocPara_m.currentActiveCellCount + i; //Ali //WE WILL UPDATE THE NUCLEUSLOCPERCENTAGE HERE! cellInfoVecs.nucleusLocPercent[cellRankDaughter] = cellInfoVecs.nucleusLocPercent[cellRank]; std::cout<<"nucleusLocPercent Mother["<<cellRank<<"] = "<<cellInfoVecs.nucleusLocPercent[cellRank]<<std::endl; std::cout<<"nucleusLocPercent Daughter["<<cellRankDaughter<<"] = "<<cellInfoVecs.nucleusLocPercent[cellRankDaughter]<<std::endl; // std::cout<<"cellRankDaughter = "<<cellRankDaughter<<std::endl; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; // std::cout<<"nodeStartIndx = "<<nodeStartIndx<<std::endl; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; // std::cout<<"tmpStartIndx = "<<tmpStartIndx<<std::endl; // std::cout<<"tmpEndIndx = "<<tmpEndIndx<<std::endl; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); // for (int index = 0; index < divAuxData.tmpNodeType1.size(); index++){ // std::cout<<"divAuxData.tmpNodeType1["<<index<<"] = "<<divAuxData.tmpNodeType1[index]<<std::endl; // } thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType1.begin())) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2, divAuxData.tmpNodeType1.begin())) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin(), nodes->getInfoVecs().memNodeType1.begin() )) // the 1 in memNodeType1 is not representing cell number 1 but in the rest it represents + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[cellRank]<<std::endl; // } cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeMembrNodeCounts = "<<cellInfoVecs.activeMembrNodeCounts[cellRank]<<std::endl; // } double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRank] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRank]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence1_half; //quiescence1*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence1; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank]; } cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; cellInfoVecs.daughterCellProduced[cellRank] += 1; // std::cout<<"divAuxData.isMotherCellBehind["<<i<<"] = "<<divAuxData.isMotherCellBehind[i]<<std::endl; //Ali // if (divAuxData.isMotherCellBehind[i]) { if (divAuxData.isMotherCellBehind[i]==true) { //nodes->getInfoVecs().nodeCellRankBehindNeighb[cellRank] =nodes->getInfoVecs().nodeCellRankBehindNeighb[cellRank] ; //as before so no need to update nodes->getInfoVecs().nodeCellRankFront[cellRank] =cellRankDaughter ; int tmpCellRankFront=nodes->getInfoVecs().nodeCellRankFrontOld[cellRank] ; nodes->getInfoVecs().nodeCellRankBehind[tmpCellRankFront] =cellRankDaughter ; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"copy first cell array, cellRank = "<<cellRank<<" , "<<"tmpCellRankFront = "<<tmpCellRankFront<<std::endl; // std::cout<<"1nodeCellRankBehind["<<tmpCellRankFront<<"] = "<<nodes->getInfoVecs().nodeCellRankBehind[tmpCellRankFront]<<std::endl; // for (int j = nodeStartIndx; j < nodeStartIndx+(tmpEndIndx-tmpStartIndx); j++){ // std::cout<<nodes->getInfoVecs().nodeLocX[j]<<" "<<nodes->getInfoVecs().nodeLocY[j]<<" "<<nodes->getInfoVecs().memNodeType1[j]<<std::endl; // } // } } else { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =cellRankDaughter ; // nodes->getInfoVecs().nodeCellRankFrontNeighb[cellRank] = nodes->getInfoVecs().nodeCellRankFrontNeighb[cellRank]; //as before so no need to update int tmpCellRankBehind=nodes->getInfoVecs().nodeCellRankBehindOld[cellRank] ; nodes->getInfoVecs().nodeCellRankFront[tmpCellRankBehind] =cellRankDaughter ; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"cellRank = "<<cellRank<<" , "<<"tmpCellRankBehind = "<<tmpCellRankBehind<<std::endl; // std::cout<<"2nodeCellRankFront["<<tmpCellRankBehind<<"] = "<<nodes->getInfoVecs().nodeCellRankFront[tmpCellRankBehind]<<std::endl; // for (int j = nodeStartIndx; j < nodeStartIndx+(tmpEndIndx-tmpStartIndx); j++){ // std::cout<<nodes->getInfoVecs().nodeLocX[j]<<" "<<nodes->getInfoVecs().nodeLocY[j]<<" "<<nodes->getInfoVecs().memNodeType1[j]<<std::endl; // } // } } cellInfoVecs.numApicalVec[cellRank] = 0; cellInfoVecs.numBasalVec[cellRank] = 0; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } cellInfoVecs.cellRankVec[cellRank] = cellRank; std::cout<<"Cell["<<cellRank<<"] has "<<cellInfoVecs.numApicalVec[cellRank]<<" apical nodes and "<<cellInfoVecs.numBasalVec[cellRank]<<" basal nodes initially post division"<<std::endl; // if (cellRank == 31 || cellRank == 86){ // for (int k = 0; k < maxAllNodePerCell; k++){ // std::cout<<"cellRank = "<<cellRank<<std::endl; // std::cout<<"nodes->getInfoVecs().memNodeType1["<<k+nodeStartIndx<<"] = "<<nodes->getInfoVecs().memNodeType1[k+nodeStartIndx]<<std::endl; // } // } } } void SceCells::copySecondCellArr_M(double quiescence2, double quiescence2_half) { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { int cellRankMother=divAuxData.tmpCellRank_M[i] ; //Ali divAuxData.cellRankMother = divAuxData.tmpCellRank_M[i]; // std::cout<<"cellRankMother = "<<cellRankMother<<std::endl; uint cellRank = allocPara_m.currentActiveCellCount + i; divAuxData.cellRankDaughter = cellRank; // std::cout<<"cellRank = "<<cellRank<<std::endl; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; // std::cout<<"tmpStartIndx for tmpNodeType2 = "<<tmpStartIndx<<std::endl; // std::cout<<"tmpEndIndx for tmpNodeType2 = "<<tmpEndIndx<<std::endl; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); // for (int index = 0; index < divAuxData.tmpNodeType2.size(); index++){ // std::cout<<"divAuxData.tmpNodeType2["<<index<<"] = "<<divAuxData.tmpNodeType2[index]<<std::endl; // } thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType2.begin() )) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2,divAuxData.tmpNodeType2.begin())) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin(), nodes->getInfoVecs().memNodeType1.begin())) // 1 is not representing cell 1 + nodeStartIndx); uint cellRankToCopyFrom = cellRankMother * maxAllNodePerCell+ allocPara_m.bdryNodeCount; double actomyoMultipMother = cellInfoVecs.contractActomyo_multip_perCell[cellRankMother]; double actomyoMultipMother_apical = cellInfoVecs.contractActomyo_multip_apical_perCell[cellRankMother]; cellInfoVecs.contractActomyo_multip_perCell[cellRank] = actomyoMultipMother; cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank] = actomyoMultipMother_apical; for (int j = nodeStartIndx; j < nodeStartIndx+maxAllNodePerCell; j++){ nodes->getInfoVecs().contractActomyo_multip[j] = actomyoMultipMother;//nodes->getInfoVecs().contractActomyo_multip[cellRankToCopyFrom]; nodes->getInfoVecs().contractActomyo_multip_apical[j] = actomyoMultipMother_apical;//nodes->getInfoVecs().contractActomyo_multip_apical[cellRankToCopyFrom]; if (j==nodeStartIndx){ std::cout<<"contractActomyo_multip for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip_apical[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; } if (j==nodeStartIndx+maxAllNodePerCell-1){ std::cout<<"contractActomyo_multip for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical for cellRank = "<<cellRank<<" is "<<nodes->getInfoVecs().contractActomyo_multip_apical[j]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; std::cout<<"contractActomyo_multip_apical_perCell for cellRank = "<<cellRank<<" is "<<cellInfoVecs.contractActomyo_multip_apical_perCell[cellRank]<<", inherited from cellRank = "<<cellRankMother<<std::endl; } } cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeIntnlNodeCounts = "<<cellInfoVecs.activeIntnlNodeCounts[cellRank]<<std::endl;; // } cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; // if (cellRank == 31 || cellRank == 86){ // std::cout<<"CellRank = "<<cellRank<<"activeMembrNodeCounts = "<<cellInfoVecs.activeMembrNodeCounts[cellRank]<<std::endl;; // } double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRankMother] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRankMother]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence2_half;//quiescence2*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence2; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } // cellInfoVecs.growthProgress[cellRank] = quiescence2; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank]; } cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cellRoot[cellRank] = cellInfoVecs.cellRoot[cellRankMother]; //Ali cellInfoVecs.eCellTypeV2[cellRank] = cellInfoVecs.eCellTypeV2[cellRankMother]; //Ali cellInfoVecs.distFromNucleus_normal[cellRank] = cellInfoVecs.distFromNucleus_normal[cellRankMother]; cellInfoVecs.distFromNucleus_normal_apical[cellRank] = cellInfoVecs.distFromNucleus_normal_apical[cellRankMother]; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; std::cout<<"New Cell "<<cellRank<<" introduced!"<<std::endl; std::cout<<"distFromNucleus_normal : "<<cellInfoVecs.distFromNucleus_normal[cellRank]<<std::endl; std::cout<<"distFromNucleus_normal_apical : "<<cellInfoVecs.distFromNucleus_normal_apical[cellRank]<<std::endl; std::cout<<"isEnteringMitotic[cellRank] : "<<cellInfoVecs.isEnteringMitotic[cellRank]<<std::endl; //Ali // std::cout<<"divAuxData.isMotherCellBehind["<<i<<"] = "<<divAuxData.isMotherCellBehind[i]<<std::endl; if (divAuxData.isMotherCellBehind[i]==true) { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =cellRankMother ; nodes->getInfoVecs().nodeCellRankFront[cellRank] =nodes->getInfoVecs().nodeCellRankFrontOld[cellRankMother]; } else { nodes->getInfoVecs().nodeCellRankBehind[cellRank] =nodes->getInfoVecs().nodeCellRankBehindOld[cellRankMother]; nodes->getInfoVecs().nodeCellRankFront[cellRank] =cellRankMother ; } cellInfoVecs.numApicalVec[cellRank] = 0; cellInfoVecs.numBasalVec[cellRank] = 0; for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ if (nodes->getInfoVecs().memNodeType1[i] == apical1){ cellInfoVecs.numApicalVec[cellRank] += 1; } if (nodes->getInfoVecs().memNodeType1[i] == basal1){ cellInfoVecs.numBasalVec[cellRank] += 1; } } cellInfoVecs.cellRankVec[cellRank] = cellRank; std::cout<<"Cell["<<cellRank<<"] has "<<cellInfoVecs.numApicalVec[cellRank]<<" apical nodes and "<<cellInfoVecs.numBasalVec[cellRank]<<" basal nodes initially post division"<<std::endl; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { // for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ // if (cellInfoVecs.isDividing[i] == true){ // cellInfoVecs.growthProgress[i] = 0.0; // cellInfoVecs.cellAreaGrowthProgress[i] = 0.0; // cellInfoVecs.cellAreaGrowthProgressNonMitotic[i] = 0.0; // cellInfoVecs.isEnteringMitotic[i] = false; // } // } thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::ApplyExtForces() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; //for (int i=0 ; i <nodes->getInfoVecs().memNodeType1.size(); i++ ) { // if (nodes->getInfoVecs().memNodeType1[i]==basal1) { // cout << " I am a basal node with id="<< i << " and vx before applying external force is equal to " <<nodes->getInfoVecs().nodeVelX[i] << endl ; // } //} thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), AddExtForces(curTime)); //for (int i=0 ; i <nodes->getInfoVecs().memNodeType1.size(); i++ ) { // if (nodes->getInfoVecs().memNodeType1[i]==basal1) { // cout << " I am a basal node with id="<< i << " and vx is equal to " <<nodes->getInfoVecs().nodeVelX[i] << endl ; // } //} } void SceCells::applyMemForce_M(bool cellPolar,bool subCellPolar) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0), iBegin2(0) ; //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali thrust::device_vector<double>::iterator MinY_Itr_Cell=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr_Cell=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; double minY_Cell= *MinY_Itr_Cell ; //This variable doesn't seemed to be used even when passed into functions //Kevin double maxY_Cell= *MaxY_Itr_Cell ; //This variable doesn't seemed to be used even when passed into functions //Kevin double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); int* nodeAdhereIndexAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[0])); //assuming that number of boundary nodes are equal to zero int* cellRootAddr = thrust::raw_pointer_cast( &(cellInfoVecs.cellRoot[0])); // Ali // if (curTime>10.05) { // for (int i=0; i<nodes->getInfoVecs().nodeAdhereIndex.size(); i++) { // cout<<"node adhere index"<<i+allocPara_m.bdryNodeCount<<" is" <<nodes->getInfoVecs().nodeAdhereIndex[i]<<endl ; // } // exit (EXIT_FAILURE) ; // } //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.eCellTypeV2.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().isSubApicalJunction.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)))), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.eCellTypeV2.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().isSubApicalJunction.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)))) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeActinLevel.begin(), ActinLevelCal(maxAllNodePerCell,nodeIsActiveAddr,cellRootAddr,minY_Cell,maxY_Cell,cellPolar,subCellPolar)); //double a ; //for(int i=0 ; i<totalNodeCountForActiveCells ; i++) { // a=static_cast<double>(nodes->getInfoVecs().nodeAdhereIndex[i]-i); // cout<< "adhere index of node " << i << " is " << nodes->getInfoVecs().nodeAdhereIndex[i] << endl ; // cout<< "the normalized difference is" <<a/(2.0*680) <<"the difference is " << a << "2 time max node per cell is " << 2*maxAllNodePerCell << endl ; // } double* nodeActinLevelAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActinLevel[0])); //assuming that number of boundary nodes are equal to zero thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeAdhereIndex.begin() + allocPara_m.bdryNodeCount, make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeAdhereIndex.begin() + allocPara_m.bdryNodeCount, make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, nodeAdhereIndexAddr,nodeActinLevelAddr, grthPrgrCriVal_M,minY_Cell,maxY_Cell)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrLinSpringEnergy.begin(), nodes->getInfoVecs().membrBendSpringEnergy.begin())), CalMembrEnergy(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr,nodeActinLevelAddr, grthPrgrCriVal_M)); energyCell.totalMembrLinSpringEnergyCell=0.5 *(thrust::reduce ( nodes->getInfoVecs().membrLinSpringEnergy.begin(), nodes->getInfoVecs().membrLinSpringEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalMembrBendSpringEnergyCell=thrust::reduce ( nodes->getInfoVecs().membrBendSpringEnergy.begin(), nodes->getInfoVecs().membrBendSpringEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() ); energyCell.totalNodeIIEnergyCell=0.5*(thrust::reduce ( nodes->getInfoVecs().nodeIIEnergy.begin(), nodes->getInfoVecs().nodeIIEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalNodeIMEnergyCell=0.5*(thrust::reduce ( nodes->getInfoVecs().nodeIMEnergy.begin(), nodes->getInfoVecs().nodeIMEnergy.begin()+totalNodeCountForActiveCells, (double)0.0, thrust::plus<double>() )); energyCell.totalNodeEnergyCellOld=energyCell.totalNodeEnergyCell ; energyCell.totalNodeEnergyCell=energyCell.totalMembrLinSpringEnergyCell + energyCell.totalMembrBendSpringEnergyCell + energyCell.totalNodeIIEnergyCell + energyCell.totalNodeIMEnergyCell ; int timeStep=curTime/dt ; if ( (timeStep % 10000)==0 ) { string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string cSVFileName = "EnergyExportCell_" + uniqueSymbolOutput + ".CSV"; ofstream EnergyExportCell ; EnergyExportCell.open(cSVFileName.c_str(),ofstream::app); EnergyExportCell <<curTime<<","<<energyCell.totalMembrLinSpringEnergyCell << "," <<energyCell.totalMembrBendSpringEnergyCell << "," <<energyCell.totalNodeIIEnergyCell<<"," <<energyCell.totalNodeIMEnergyCell<<", "<< energyCell.totalNodeEnergyCell <<std::endl; } double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceX.begin(), nodes->getInfoVecs().nodeInterCellForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceX.begin(), nodes->getInfoVecs().nodeInterCellForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), //Absoulte value since we know it is always repulsion. only it is used for output data nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(), nodes->getInfoVecs().nodeInterCellForceNormal.begin(), // Absolute value to be consittent only it is used for output data nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeInternalAvgPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin())), CVec2Divide()); } void SceCells::computeInternalMaxMinPos_M() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // thrust::counting_iterator<uint> iBegin(0); // thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); // //uint totalMembrActiveNodeCount = thrust::reduce( // // cellInfoVecs.activeMembrNodeCounts.begin(), // // cellInfoVecs.activeMembrNodeCounts.begin() // // + allocPara_m.currentActiveCellCount); // uint totalIntnlActiveNodeCount = thrust::reduce( // cellInfoVecs.activeIntnlNodeCounts.begin(), // cellInfoVecs.activeIntnlNodeCounts.begin() // + allocPara_m.currentActiveCellCount); // thrust::copy_if( // thrust::make_zip_iterator( // thrust::make_tuple( // make_transform_iterator(iBegin, // DivideFunctor( // allocPara_m.maxAllNodePerCell)), // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount)), // thrust::make_zip_iterator( // thrust::make_tuple( // make_transform_iterator(iBegin, // DivideFunctor( // allocPara_m.maxAllNodePerCell)), // nodes->getInfoVecs().nodeLocX.begin() // + allocPara_m.bdryNodeCount, // nodes->getInfoVecs().nodeLocY.begin() // + allocPara_m.bdryNodeCount)) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeCellType.begin())) // + allocPara_m.bdryNodeCount, // thrust::make_zip_iterator( // thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), // cellNodeInfoVecs.activeXPoss.begin(), // cellNodeInfoVecs.activeYPoss.begin())), // ActiveAndIntnl()); // thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), // cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, // thrust::make_zip_iterator( // thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), // cellNodeInfoVecs.activeYPoss.begin())), // cellInfoVecs.cellRanksTmpStorage.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), // thrust::equal_to<uint>(), CVec2Add()); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.activeIntnlNodeCounts.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.InternalAvgX.begin(), // cellInfoVecs.InternalAvgY.begin())), CVec2Divide()); } void SceCells::applyVolumeConstraint(double timeRatio, double timeRatio_Crit_Division, double volume_Increase_Target_Ratio, double volume_Increase_Scale, double postDivision_restorationRateScale, int cycle) { calCellArea(); // for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ // if (cellInfoVecs.cellAreaVec[k] < 0){ // cellInfoVecs.cellAreaVec[k] = -1.0*cellInfoVecs.cellAreaVec[k]; // } // } // !!!!!NOTE!!!!!! this is currently an ad hoc way to make sure area of each triangle is positive. if (timeRatio == timeRatio_Crit_Division || timeRatio == timeRatio_Crit_Division+0.2){ std::cout<<"Current timeRatio = "<<timeRatio<<std::endl; for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ std::cout<<"Cell["<<k<<"] area = "<<cellInfoVecs.cellAreaVec[k]<<std::endl; std::cout<<"CellCenter = "<<cellInfoVecs.centerCoordX[k]<<" "<<cellInfoVecs.centerCoordY[k]<<std::endl; } } if (timeRatio > timeRatio_Crit_Division && nodes->isCellAreaDisplayed==false){ for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ // if (k == 31 || k == 86){ // std::cout<<"Cell["<<k<<"] area = "<<cellInfoVecs.cellAreaVec[k]<<std::endl; // std::cout<<"CellCenter = "<<cellInfoVecs.centerCoordX[k]<<" "<<cellInfoVecs.centerCoordY[k]<<std::endl; // } } nodes->isCellAreaDisplayed=true; } // computeLagrangeForces(); // if (timeRatio >= timeRatio_Crit_Division){ // double cellAreaDesire=(cellInfoVecs.cellAreaVec[divAuxData.cellRank_division]) + (65 - cellInfoVecs.cellAreaVec[divAuxData.cellRank_division])*postDivision_restorationRateScale*(timeRatio - timeRatio_Crit_Division )/(1.0 - timeRatio_Crit_Division); // double cellAreaDesire2=(cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2]) + (65 - cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2])*postDivision_restorationRateScale*(timeRatio - timeRatio_Crit_Division )/(1.0 - timeRatio_Crit_Division); // std::cout<<"cellAreaCurrent["<<divAuxData.cellRank_division<<"] = "<<cellInfoVecs.cellAreaVec[divAuxData.cellRank_division]<<" , "<<"cellAreaCurrent["<<divAuxData.cellRank_division2<<"] = "<<cellInfoVecs.cellAreaVec[divAuxData.cellRank_division2]<<std::endl; // std::cout<<"cellAreaDesire["<<divAuxData.cellRank_division<<"] = "<<cellAreaDesire<<" , "<<"cellAreaDesire["<<divAuxData.cellRank_division2<<"] = "<<cellAreaDesire2<<std::endl; // } computeLagrangeForces(timeRatio, volume_Increase_Target_Ratio); } void SceCells::computeCenterPos_M2() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalMembrActiveNodeCount = thrust::reduce( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount); //uint totalIntnlActiveNodeCount = thrust::reduce( // cellInfoVecs.activeIntnlNodeCounts.begin(), // cellInfoVecs.activeIntnlNodeCounts.begin() // + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndMembr()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalMembrActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " cell center in X direction is " << cellInfoVecs.centerCoordX[i] << endl ; cout << "for cell rank "<<i<< " cell center in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; } */ } void SceCells::computeLagrangeForces(double timeRatio, double volume_Increase_Target_Ratio) { uint maxMembrNode = allocPara_m.maxMembrNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double* cellAreaVecAddr= thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaVec[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; ECellType* eCellTypeV2Addr= thrust::raw_pointer_cast( &(cellInfoVecs.eCellTypeV2[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); double* cellAreaGrowthProgress = thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaGrowthProgress[0])); double* cellAreaGrowthProgressNonMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.cellAreaGrowthProgressNonMitotic[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().lagrangeFX.begin(), nodes->getInfoVecs().lagrangeFY.begin(), nodes->getInfoVecs().lagrangeFN.begin())), AddLagrangeForces(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr,cellAreaVecAddr,grthPrgrCriVal_M, eCellTypeV2Addr, timeRatio, volume_Increase_Target_Ratio, isEnteringMitotic, cellAreaGrowthProgress, cellAreaGrowthProgressNonMitotic)); uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin2(0); thrust::reduce_by_key( make_transform_iterator(iBegin2, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin2, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().lagrangeFX.begin(), nodes->getInfoVecs().lagrangeFY.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.sumLagrangeFPerCellX.begin(), cellInfoVecs.sumLagrangeFPerCellY.begin())), thrust::equal_to<uint>(), CVec2Add()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " the summation of lagrangian force in X direction is " << cellInfoVecs.sumLagrangeFPerCellX[i] << endl ; cout << "for cell rank "<<i<< " the summation of lagrangian force in Y direction is " << cellInfoVecs.sumLagrangeFPerCellY[i] << endl ; } */ } void SceCells::computeContractileRingForces() { uint maxMembrNode = allocPara_m.maxMembrNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringApicalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringBasalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringApicalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.ringBasalId.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddContractileRingForces(maxAllNodePerCell,nodeLocXAddr, nodeLocYAddr, grthPrgrCriVal_M)); } void SceCells::BC_Imp_M() { thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; double MinX= *MinX_Itr ; double MaxX= *MaxX_Itr ; double MinY= *MinY_Itr ; double MaxY= *MaxY_Itr ; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(MinX,MaxX,MinY,MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::assignMemNodeType() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin2(0) ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin(), make_transform_iterator(iBegin2,ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))))), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin(), make_transform_iterator(iBegin2,ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))))) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().memNodeType1.begin(), nodes->getInfoVecs().nodeIsApicalMem.begin(), nodes->getInfoVecs().nodeIsBasalMem.begin())) ,AssignMemNodeType()); } // This function is written with the assumption that there is at least one basal point for each cell. void SceCells::computeBasalLoc() { uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); int* basalNodeCountAddr = thrust::raw_pointer_cast( &(cellInfoVecs.basalNodeCount[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsBasalMem.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.basalNodeCount.begin(), thrust::equal_to<uint>(), thrust::plus<int>()); uint totalBasalNodeCount = thrust::reduce( cellInfoVecs.basalNodeCount.begin(), cellInfoVecs.basalNodeCount.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeLocXBasal.begin(), cellNodeInfoVecs.activeLocYBasal.begin())), ActiveAndBasal()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalBasalNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeLocXBasal.begin(), cellNodeInfoVecs.activeLocYBasal.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin())), thrust::equal_to<uint>(), CVec2Add()); // up to here basaLocX and basalLocY are the summation. We divide them // thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin())), BasalLocCal(basalNodeCountAddr)); } void SceCells::computeApicalLoc(double timeRatio, double timeRatio_Crit_Division) { uint maxNPerCell = allocPara_m.maxAllNodePerCell; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // std::cout<<"AHAHA 1"<<std::endl; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); // int* apicalNodeCountAddr = thrust::raw_pointer_cast( // &(cellInfoVecs.apicalNodeCount[0])); // std::cout<<"AHAHA 2"<<std::endl; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsApicalMem.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.apicalNodeCount.begin(), thrust::equal_to<uint>(), thrust::plus<int>()); int* apicalNodeCountAddr = thrust::raw_pointer_cast( &(cellInfoVecs.apicalNodeCount[0])); // std::cout<<"AHAHA 2.5"<<std::endl; int sizeApical=cellInfoVecs.apicalNodeCount.size() ; // std::cout<<"AHAHA 3"<<std::endl; uint totalApicalNodeCount = thrust::reduce( cellInfoVecs.apicalNodeCount.begin(), cellInfoVecs.apicalNodeCount.begin() + allocPara_m.currentActiveCellCount); // std::cout<<"AHAHA 4"<<std::endl; thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeLocXApical.begin(), cellNodeInfoVecs.activeLocYApical.begin())), ActiveAndApical()); //for (int i=sizeApical-40 ; i<sizeApical ; i++) { // cout << " the location of apical node " << i << " is "<<cellNodeInfoVecs.activeLocXApical[i] << " and " << cellNodeInfoVecs.activeLocYApical[i] << endl ; //} // std::cout<<"AHAHA 5"<<std::endl; thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalApicalNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeLocXApical.begin(), cellNodeInfoVecs.activeLocYApical.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::equal_to<uint>(), CVec2Add()); // up to here apicalLocX and apicalLocY are the summation. We divide them if at lease one apical node exist. // 0,0 location for apical node indicates that there is no apical node. /* // I comment this section since for now all the cells have apical node // // special consideration for the cells with no apical nodes int NumCellsWithApicalNode=0 ; for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { if (cellInfoVecs.apicalNodeCount[i]!=0) { NumCellsWithApicalNode=NumCellsWithApicalNode +1; } } */ //finish commenting speical consideration for the cells with no apical node //simply these two are equal int NumCellsWithApicalNode=allocPara_m.currentActiveCellCount ; // // std::cout<<"AHAHA 6"<<std::endl; //cout << "num of cells with apical node is " << NumCellsWithApicalNode << endl ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.cellRanksTmpStorage.begin() )) + NumCellsWithApicalNode, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), ApicalLocCal(apicalNodeCountAddr)); /* I comment this section since for this simulation all the cells have apical node // start special consideration for the cells which have no apical node //reargment to also include the cell which have not apical cells and assign the location for them as 0,0 for (int i=0 ; i<allocPara_m.currentActiveCellCount-1 ; i++) { // if the cell with 0 apical node is at the end, we are fine. if (cellInfoVecs.apicalNodeCount[i]==0) { cout << " I am inside complicated loop" << endl ; for (int j=allocPara_m.currentActiveCellCount-2 ; j>=i ; j--) { cellInfoVecs.apicalLocX[j+1]=cellInfoVecs.apicalLocX[j] ; cellInfoVecs.apicalLocY[j+1]=cellInfoVecs.apicalLocY[j] ; } cellInfoVecs.apicalLocX[i]=0 ; cellInfoVecs.apicalLocY[i]=0 ; } } if (cellInfoVecs.apicalNodeCount[allocPara_m.currentActiveCellCount-1]==0) { // if the cell with 0 apical node is at the end, no rearrngment is required cellInfoVecs.apicalLocX[allocPara_m.currentActiveCellCount-1]=0 ; cellInfoVecs.apicalLocY[allocPara_m.currentActiveCellCount-1]=0 ; } // finish special consideration for the cells that have not apical nodes */ // if (timeRatio == timeRatio_Crit_Division){ // std::cout<<"totalNodeCountForActiveCells = "<<totalNodeCountForActiveCells<<std::endl; // std::cout<<"nodes->getInfoVecs().nodeIsApicalMem = "<<nodes->getInfoVecs().nodeIsApicalMem.size()<<std::endl; // for (int i = 0; i < nodes->getInfoVecs().nodeIsApicalMem.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<nodes->getInfoVecs().nodeIsApicalMem[i]<<std::endl; // } // else if (i == nodes->getInfoVecs().nodeIsApicalMem.size()-1){ // std::cout<<"i = "<<i<<", "<<nodes->getInfoVecs().nodeIsApicalMem[i]<<std::endl; // } // else{continue;} // } // std::cout<<"cellInfoVecs.cellRanksTmpStorage = "<<cellInfoVecs.cellRanksTmpStorage.size()<<std::endl; // for (int i = 0; i < cellInfoVecs.cellRanksTmpStorage.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.cellRanksTmpStorage[i]<<std::endl; // } // else if (i == cellInfoVecs.cellRanksTmpStorage.size()-1){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.cellRanksTmpStorage[i]<<std::endl; // } // else{continue;} // } // std::cout<<"cellInfoVecs.apicalNodeCount = "<<cellInfoVecs.apicalNodeCount.size()<<std::endl; // for (int i = 0; i < cellInfoVecs.apicalNodeCount.size(); i++){ // if (i == 0){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.apicalNodeCount[i]<<std::endl; // } // else if (i == cellInfoVecs.apicalNodeCount.size()-1){ // std::cout<<"i = "<<i<<", "<<cellInfoVecs.apicalNodeCount[i]<<std::endl; // } // else{continue;} // } // } } // this function is not currently active. It is useful when the level of growth needs to be related to nucleus location. void SceCells::computeNucleusLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())), CalNucleusLoc()); //for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { // cout << "for cell rank "<< i << " Cell progress is " << cellInfoVecs.growthProgress[i] << endl ; // cout << "for cell rank "<< i << " Nucleus location in X direction is " << cellInfoVecs.nucleusLocX[i] <<" in Y direction is " << cellInfoVecs.nucleusLocY[i] << endl ; // cout << "for cell rank "<< i << " apical location in X direction is " << cellInfoVecs.apicalLocX[i] <<" in Y direction is " << cellInfoVecs.apicalLocY[i] << endl ; // cout << "for cell rank "<< i << " center location in X direction is " << cellInfoVecs.centerCoordX[i] <<" in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; //} } void SceCells::computeIndividualCellHeight(double distFromNucleus_normalMax1,double distFromNucleus_normalMax2,double distFromNucleus_normalMax3, double distFromNucleus_normalMax_apical1, double distFromNucleus_normalMax_apical2, double distFromNucleus_normalMax_apical3){ // double* individualCellHeightPreMitotic = thrust::raw_pointer_cast(&(cellInfoVecs.individualCellHeight[0])); thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.individualCellHeight.begin(), cellInfoVecs.distFromNucleus_normal.begin(), cellInfoVecs.distFromNucleus_normal_apical.begin())), CalCellHeight( distFromNucleus_normalMax1, distFromNucleus_normalMax2, distFromNucleus_normalMax3, distFromNucleus_normalMax_apical1, distFromNucleus_normalMax_apical2, distFromNucleus_normalMax_apical3)); } void SceCells::computeIndividualCellHeight_Ver2(){ // double* individualCellHeightPreMitotic = thrust::raw_pointer_cast(&(cellInfoVecs.individualCellHeight[0])); thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin(), cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocY.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.individualCellHeight.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.individualCellHeight.begin(), CalCellHeight_Ver2()); } void SceCells::computeNucleusDesireLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusDesireLocX.begin(), cellInfoVecs.nucleusDesireLocY.begin(), cellInfoVecs.nucDesireDistApical.begin())), CalNucleusDesireLoc()); } void SceCells::computeCellCenterPerturbedLoc() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.nucleusLocPercent.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cellCenterPerturbedLocLocX.begin(), cellInfoVecs.cellCenterPerturbedLocLocY.begin(), cellInfoVecs.cellCenterDesireDistApical.begin())), CalCellCenterPerturbedLoc()); } void SceCells::computeNucleusIniLocPercent() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.apicalLocY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.nucleusLocPercent.begin(), CalNucleusIniLocPercent()); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<< i << " nucleus cell percent is " << cellInfoVecs.nucleusLocPercent[i] << endl ; cout << "for cell rank "<< i << " cell center in Y direction is " << cellInfoVecs.centerCoordY[i] << endl ; cout << "for cell rank "<< i << " apical location in Y direction is " << cellInfoVecs.apicalLocY[i] << endl ; cout << "for cell rank "<< i << " Internal average in Y direction is " << cellInfoVecs.InternalAvgY[i] << endl ; } */ } // this function is not currently active. It is used when 1) internal nodes are used to represent the nucleus 2) we wanted to force the internal nodes to be at desired location. The problem with this method is that it will create net unphysical force on the cell. void SceCells::updateInternalAvgPosByNucleusLoc_M () { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.nucleusLocX.begin(), cellInfoVecs.nucleusLocY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.tmpShiftVecX.begin(), cellInfoVecs.tmpShiftVecY.begin())), CalShiftVec()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.tmpShiftVecY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), AdjustInternalNodesLoc(maxMemNodePerCell)); } // void SceCells::growAtRandom_M(double dt) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // cout << "dt inside growAtRandom_M is: "<< dt << endl ; // randomizeGrowth_M(); // updateGrowthProgress_M(); // decideIsScheduleToGrow_M(); // //computeCellTargetLength_M(); // //computeDistToCellCenter_M(); // //findMinAndMaxDistToCenter_M(); // //computeLenDiffExpCur_M(); // //stretchCellGivenLenDiff_M(); // addPointIfScheduledToGrow_M(); // //decideIsScheduleToShrink_M();// AAMIRI May5 // //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 // int currentActiveCellCount = allocPara_m.currentActiveCellCount ; // thrust::device_vector<double>::iterator minCellProgress_Itr=thrust::min_element(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin()+ currentActiveCellCount) ; // double minCell_Progress= *minCellProgress_Itr ; // if (minCell_Progress > 0 ) { // to not intefer with initialization with negative progress and no cell should divide before every one is positive. // adjustGrowthInfo_M(); // // } // } // void SceCells::growAtRandom_M(double dt) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // // cout << "dt inside growAtRandom_M is: "<< dt << endl ; // // randomizeGrowth_M(); // updateGrowthProgress_M(); // // decideIsScheduleToGrow_M(); // // int currentActiveCellCount = allocPara_m.currentActiveCellCount ; // // thrust::device_vector<double>::iterator minCellProgress_Itr=thrust::min_element(cellInfoVecs.growthProgress.begin(), // // cellInfoVecs.growthProgress.begin()+ currentActiveCellCount) ; // // double minCell_Progress= *minCellProgress_Itr ; // // if (minCell_Progress > 0 ) { // to not intefer with initialization with negative progress and no cell should divide before every one is positive. // // adjustGrowthInfo_M(); // // // } // } void SceCells::growAtRandom_M(double growthProgressSpeed) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; updateGrowthProgress_M(growthProgressSpeed); } void SceCells::growAtRandom_M_Ver2(double growthProgressSpeed, double mitoticThreshold) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // cellRankFront = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankFront[0])); // cellRankBehind = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankBehind[0])); updateGrowthProgress_M_Ver2(growthProgressSpeed, mitoticThreshold); } //Ali void SceCells::enterMitoticCheckForDivAxisCal(double mitoticThreshold) { bool isEnteringMitotic = decideIfAnyCellEnteringMitotic(mitoticThreshold) ; //A&A //A&A // if (isEnteringMitotic){ // std::cout<< "I am in EnteringMitotic"<< std::endl; // copyCellsEnterMitotic(); // // findHertwigAxis(); // findHertwigAxis_useBasalApicalLoc(); // } } void SceCells::divide2D_M(double volume_Increase_Target_Ratio, double timeRatio, double thresholdToIntroduceNewCell) { bool isDivisionPresent = decideIfGoingToDivide_M(volume_Increase_Target_Ratio); // std::cout<<"isDivisionPresent = "<<isDivisionPresent<<", number of cells undergoing division = "<<divAuxData.toBeDivideCount<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; if (isDivisionPresent == false) { // std::cout<<"No cell division is present!"<<std::endl; return; } uint seed = time(NULL); thrust::default_random_engine rng(seed); rng.discard(allocPara_m.currentActiveCellCount); thrust::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::uniform_real_distribution<double> distribution_half(0.0, 0.5); // thrust::uniform_real_distribution<double> distribution_half(0.0, 0.25); // if (timeRatio >= 0.5){ // thrust::uniform_real_distribution<double> distribution(2.0, 4.0); // } double quiescence1, quiescence1_half; double quiescence2, quiescence2_half; double isNewCellIntroduced; // double thresholdToIntroduceNewCell = 0.3;//0.15; std::cout<<"Chance of new cell introduction = "<<thresholdToIntroduceNewCell<<std::endl; for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ if (cellInfoVecs.cellAreaVec[i] < (65*volume_Increase_Target_Ratio*0.9)){ std::cout<<"Cell Division occurs without reaching at least 90% of the target cell volume for division! Need to readjust parameters."<<std::endl; } } } isNewCellIntroduced = distribution(rng);//(distribution(rng) - 1.0)/2.0; // if (1 > 0){ if (isNewCellIntroduced < thresholdToIntroduceNewCell){ quiescence1 = -1.0*distribution(rng); // quiescence1_half = -1.0*distribution_half(rng); quiescence1_half = 1.0*distribution_half(rng); quiescence2 = -1.0*distribution(rng); // quiescence2_half = -1.0*distribution_half(rng); quiescence2_half = 1.0*distribution_half(rng); // std::cout<<"cellArea[10] = "<<cellInfoVecs.cellAreaVec[10]<<std::endl; // std::cout<<"cellArea[19] = "<<cellInfoVecs.cellAreaVec[19]<<std::endl; // std::cout<<"cellArea[28] = "<<cellInfoVecs.cellAreaVec[28]<<std::endl; copyCellsEnterDivision(); // std::cout<<"ERROR HERE 0?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; findHertwigAxis_useBasalApicalLoc(); //aniDebug = true; // std::cout<<"ERROR HERE 1?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copyCellsPreDivision_M(); // std::cout<<"ERROR HERE 2?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; createTwoNewCellArr_M(); // main function which plays with position of internal nodes and membrane new created nodes. // std::cout<<"ERROR HERE 3?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copyFirstCellArr_M(quiescence1, quiescence1_half); // copy the first cell information to GPU level and initilize values such as cell prgoress and cell rank .. // std::cout<<"ERROR HERE 4?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; copySecondCellArr_M(quiescence2, quiescence2_half);// copy the second cell information to GPU level and initilize values such as cell prgoress and cell rank .. // std::cout<<"ERROR HERE 5?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; updateActiveCellCount_M(); // std::cout<<"ERROR HERE 6?"<<std::endl; // std::cout<<"isDividing["<<10<<"] = "<<cellInfoVecs.isDividing[10]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[10]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[10]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[10]<<std::endl; // std::cout<<"isDividing["<<19<<"] = "<<cellInfoVecs.isDividing[19]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[19]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[19]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[19]<<std::endl; // std::cout<<"isDividing["<<28<<"] = "<<cellInfoVecs.isDividing[28]<<", isEnteringMitotic = "<<cellInfoVecs.isEnteringMitotic[28]<<", cellAreaGrowthProgress = "<<cellInfoVecs.cellAreaGrowthProgress[28]<<", cellAreaGrowthProgressNonMitotic = "<<cellInfoVecs.cellAreaGrowthProgressNonMitotic[28]<<std::endl; markIsDivideFalse_M(); // std::cout<<"ERROR HERE 7?"<<std::endl; //divDebug(); //Ali nodes->isMemNodeTypeAssigned_postCellDivision = false; cellInfoVecs.isPostDivision = true; // for (int k = 0; k < nodes->getInfoVecs().nodeCellRankFront.size(); k++){ uint cellRank_print = 0; for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out order of cells in the tissue post growth"<<std::endl; std::cout<<k<<" "<<nodes->getInfoVecs().nodeCellRankFront[cellRank_print]<<" "; } // if (k == allocPara_m.currentActiveCellCount){ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[k]<<" "<<std::endl;; // } else{ std::cout<<nodes->getInfoVecs().nodeCellRankFront[nodes->getInfoVecs().nodeCellRankFront[cellRank_print]]<<" "; } cellRank_print = nodes->getInfoVecs().nodeCellRankFront[cellRank_print]; } // for (int k = 0; k < nodes->getInfoVecs().nodeCellRankFront.size(); k++){ for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out associated multip of cells in the tissue post growth"<<std::endl; std::cout<<nodes->getInfoVecs().contractActomyo_multip[k*allocPara_m.maxAllNodePerCell]<<" "<<nodes->getInfoVecs().contractActomyo_multip[nodes->getInfoVecs().nodeCellRankFront[k]*allocPara_m.maxAllNodePerCell]<<" "; } // if (k == allocPara_m.currentActiveCellCount){ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[k]<<" "<<std::endl;; // } else{ // std::cout<<nodes->getInfoVecs().nodeCellRankFront[nodes->getInfoVecs().nodeCellRankFront[k-1]]<<" "; std::cout<< nodes->getInfoVecs().contractActomyo_multip[nodes->getInfoVecs().nodeCellRankFront[k-1]*allocPara_m.maxAllNodePerCell]<<" "; } } for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out number of cell daughter procued"<<std::endl; } if (k == allocPara_m.currentActiveCellCount-1){ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "<<std::endl;; } else{ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "; } } } else{ double quiescence3 = -1.0*distribution(rng); // double quiescence3_half = -1.0*distribution_half(rng); double quiescence3_half = 1.0*distribution_half(rng); copyCellsEnterDivision(); uint cellRank = divAuxData.tmpCellRank_M[0]; double leftOrRight = cellInfoVecs.centerCoordX[31]*cellInfoVecs.centerCoordY[cellRank] - cellInfoVecs.centerCoordY[31]*cellInfoVecs.centerCoordX[cellRank]; if (leftOrRight >= 0){ cellInfoVecs.growthProgress[cellRank] = quiescence3_half;//quiescence3*0.5; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } else{ cellInfoVecs.growthProgress[cellRank] = quiescence3; std::cout<<"leftOrRight : "<<leftOrRight<<", quiescence : "<<cellInfoVecs.growthProgress[cellRank]<<std::endl; } for (int i = cellRank*allocPara_m.maxAllNodePerCell; i < (cellRank+1)*allocPara_m.maxAllNodePerCell; i++){ nodes->getInfoVecs().quiescencePerNode[i] = cellInfoVecs.growthProgress[cellRank];//quiescence3; } // cellInfoVecs.growthProgress[cellRank] = quiescence3; cellInfoVecs.cellAreaGrowthProgress[cellRank] = 0; cellInfoVecs.cellAreaGrowthProgressNonMitotic[cellRank] = 0; cellInfoVecs.isEnteringMitotic[cellRank] = false; cellInfoVecs.isDividing[cellRank] = false; cellInfoVecs.distFromBasalLoc[cellRank] = 0.0; cellInfoVecs.distFromApicalLoc[cellRank] = 0.0; cellInfoVecs.daughterCellProduced[cellRank] += 1; for (int k = 0; k < allocPara_m.currentActiveCellCount; k++){ if (k == 0){ std::cout<<"Printing out number of cell daughter procued"<<std::endl; } if (k == allocPara_m.currentActiveCellCount){ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<" "<<std::endl;; } else{ std::cout<<"["<<k<<", "<<cellInfoVecs.daughterCellProduced[k]<<"] "; } } int startIndex = cellRank*allocPara_m.maxAllNodePerCell + allocPara_m.maxMembrNodePerCell; int numOfNucleus = cellInfoVecs.activeIntnlNodeCounts[cellRank]; std::cout<<"Current number of nucleus labeled as 'active' = "<<numOfNucleus<<", before attempting to reduce the number due to out-of-plane cell division"<<std::endl; // if (cellInfoVecs.activeIntnlNodeCounts[cellRank] < 48){ // std::cout<<"Reducing nucleus size in out-of-plane cell division. But the current nucleus account is less than the default value (default = 24) ! SOMETHING IS WRONG!"<<std::endl; // } double activeNucNodeX = 0.0, activeNucNodeY = 0.0; cellInfoVecs.activeIntnlNodeCounts[cellRank] = 0; for (int i = 0; i < numOfNucleus; i++){ if (i < 24){ nodes->getInfoVecs().nodeIsActive[i + startIndex] = true; activeNucNodeX += nodes->getInfoVecs().nodeLocX[i + startIndex]; activeNucNodeY += nodes->getInfoVecs().nodeLocY[i + startIndex]; // std::cout<<"Saved nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; cellInfoVecs.activeIntnlNodeCounts[cellRank] += 1; } else{ nodes->getInfoVecs().nodeIsActive[i + startIndex] = false; nodes->getInfoVecs().nodeLocX[i + startIndex] = 0.0; nodes->getInfoVecs().nodeLocY[i + startIndex] = 0.0; } } activeNucNodeX = activeNucNodeX/24.0; activeNucNodeY = activeNucNodeY/24.0; // std::cout<<"activeNucNodeX, Y: "<<activeNucNodeX<<", "<<activeNucNodeY<<std::endl; for (int i = 0; i < 24; i++){ if (nodes->getInfoVecs().nodeIsActive[i + startIndex] == true){ // std::cout<<"To be moved nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; // std::cout<<"shifted by X, Y ["<<i<<"]: "<<(activeNucNodeX - nodes->getInfoVecs().nodeLocX[i + startIndex])*0.9<<", "<<(activeNucNodeY - nodes->getInfoVecs().nodeLocY[i + startIndex])*0.9<<std::endl; double distFromNucToNucCenter = sqrt((-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex])*(-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex]) + (-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])*(-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])); nodes->getInfoVecs().nodeLocX[i + startIndex] = (-activeNucNodeX + nodes->getInfoVecs().nodeLocX[i + startIndex])/distFromNucToNucCenter + activeNucNodeX;//nodes->getInfoVecs().nodeLocX[i + startIndex]; nodes->getInfoVecs().nodeLocY[i + startIndex] = (-activeNucNodeY + nodes->getInfoVecs().nodeLocY[i + startIndex])/distFromNucToNucCenter + activeNucNodeY;//nodes->getInfoVecs().nodeLocY[i + startIndex]; // std::cout<<"Resulting nuc["<<i<<"] : "<<nodes->getInfoVecs().nodeLocX[i + startIndex]<<", "<<nodes->getInfoVecs().nodeLocY[i + startIndex]<<std::endl; } } // std::cout<<"Current number of nucleus labeled as 'active' = "<<numOfNucleus<<", after reducing the number for out-of-plane cell division"<<std::endl; } } void SceCells::eCMCellInteraction(bool cellPolar,bool subCellPolar, bool isInitPhase, double timeRatio, double timeRatio_Crit_ECM, double timeRatio_Crit_Division, int relaxCount, double mitoticThreshold) { int totalNodeCountForActiveCellsECM = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; // eCMPointerCells->ApplyECMConstrain(allocPara_m.currentActiveCellCount,totalNodeCountForActiveCellsECM,curTime,dt,Damp_Coef,cellPolar,subCellPolar,isInitPhase);//, timeRatio, timeRatio_Crit_ECM, timeRatio_Crit_Division, relaxCount); eCMPointerCells->ApplyECMConstrain(allocPara_m.currentActiveCellCount,totalNodeCountForActiveCellsECM,curTime,dt,Damp_Coef,cellPolar,subCellPolar,isInitPhase, mitoticThreshold); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } void SceCells::randomizeGrowth_M() { thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; double minY_Tisu= *MinY_Itr ; double maxY_Tisu= *MaxY_Itr ; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.eCellTypeV2.begin(), cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.eCellTypeV2.begin(), cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(minY_Tisu,maxY_Tisu,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); for (int i=0 ; i<1 ; i++) { cout << "cell growth speed for rank " <<i << " is " << cellInfoVecs.growthSpeed [i] << endl ; } cout << "the min growth speed is " << growthAuxData.randomGrowthSpeedMin << endl ; cout << "the max growth speed is " << growthAuxData.randomGrowthSpeedMax << endl ; } // void SceCells::updateGrowthProgress_M() { // thrust::counting_iterator<uint> iBegin(0); // thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); // thrust::copy(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgressOld.begin()); // // thrust::transform(cellInfoVecs.growthSpeed.begin(), // // cellInfoVecs.growthSpeed.begin() // // + allocPara_m.currentActiveCellCount, // // cellInfoVecs.growthProgress.begin(), // // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthSpeed.begin(), // iBegin)), // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, // cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, // iEnd)), // cellInfoVecs.growthProgress.begin(), // progress_BCImp(dt)); // } void SceCells::updateGrowthProgress_M(double growthProgressSpeed) { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); int* daughterCellProduced = thrust::raw_pointer_cast( &(cellInfoVecs.daughterCellProduced[0])); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); // thrust::transform(cellInfoVecs.growthSpeed.begin(), // cellInfoVecs.growthSpeed.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin(), iBegin)), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, iEnd)), cellInfoVecs.growthProgress.begin(), progress_BCImp(growthProgressSpeed, daughterCellProduced)); } void SceCells::updateGrowthProgress_M_Ver2(double growthProgressSpeed, double mitoticThreshold) { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); int* cellRankFront = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankFront[0])); int* cellRankBehind = thrust::raw_pointer_cast(&(nodes->getInfoVecs().nodeCellRankBehind[0])); double* currentGrowthProgress = thrust::raw_pointer_cast(&cellInfoVecs.growthProgress[0]); int* daughterCellProduced = thrust::raw_pointer_cast( &(cellInfoVecs.daughterCellProduced[0])); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); // thrust::transform(cellInfoVecs.growthSpeed.begin(), // cellInfoVecs.growthSpeed.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin(), iBegin)), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.growthProgress.begin()+ allocPara_m.currentActiveCellCount, cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, iEnd)), cellInfoVecs.growthProgress.begin(), progress_BCImp_Ver2(growthProgressSpeed, daughterCellProduced, cellRankFront, cellRankBehind, currentGrowthProgress, mitoticThreshold)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterX = 26.0; double laserCenterY = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.InternalAvgX.begin() + activeCellCount, cellInfoVecs.InternalAvgY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M(double volume_Increase_Target_Ratio) { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.cellAreaVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.cellAreaVec.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(65.0*volume_Increase_Target_Ratio*0.9)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 1){ int howManyCellsAreDividing = 0; for (int i = 0; i < cellInfoVecs.isDividing.size(); i++){ if (cellInfoVecs.isDividing[i] == true){ howManyCellsAreDividing += 1; if (howManyCellsAreDividing > 1){ cellInfoVecs.isDividing[i] = false; } } } divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); std::cout<<"More than one cell is undergoing division. The number is trimmed down to "<<divAuxData.toBeDivideCount<<" to avoid model instability!"<<std::endl; } if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A // bool SceCells::decideIfAnyCellEnteringMitotic() { // // double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // // - growthAuxData.prolifDecay // // * (growthAuxData.grthProgrEndCPU // // - growthAuxData.grthPrgrCriVal_M_Ori); // double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgressOld.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.growthProgress.begin(), // cellInfoVecs.growthProgressOld.begin())) // + allocPara_m.currentActiveCellCount, // cellInfoVecs.isEnteringMitotic.begin(), // CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // //CompuIsEnteringMitotic_M(0.98)); // Ali for cross section modeling // // sum all bool values which indicate whether the cell is going to divide. // // toBeDivideCount is the total number of cells going to divide. // // divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), // // cellInfoVecs.isEnteringMitotic.begin() // // + allocPara_m.currentActiveCellCount, (uint) (0)); // // if (cycle == 0){ // std::fill(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.end(),false); // cellInfoVecs.isEnteringMitotic[divAuxData.cellRank_division] = true; // // } // // else if (cycle == 1){ // // std::fill(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.end(),false); // // cellInfoVecs.isEnteringMitotic[divAuxData.cellRank_division] = true; // // } // divAuxData.toEnterMitoticCount = 1; // if (divAuxData.toEnterMitoticCount > 0) { // return true; // } else { // return false; // } // } bool SceCells::decideIfAnyCellEnteringMitotic(double grthPrgrCriVal_M) { // double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); // double grthPrgrCriVal_M = 0.8973;//divAuxData.targetCellDividingArea; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); //CompuIsEnteringMitotic_M(0.98)); // Ali for cross section modeling // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter) { //AliE uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE //thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE //thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorNodeActinLevel(maxActiveNode);//Ali thrust::host_vector<double> hostTmpVectorInterCellForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorInterCellForceNormal(maxActiveNode);//AAMIRI thrust::host_vector<int> hostTmpContractPair(maxActiveNode); // thrust::host_vector<double> hostTmpNodeVelX(maxActiveNode); // thrust::host_vector<double> hostTmpNodeVelY(maxActiveNode); thrust::host_vector<double> hostTmpNodeContrApi(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeInterCellForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeInterCellForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeInterCellForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorInterCellForceTangent.begin(), hostTmpVectorInterCellForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it /* thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin(), //Ali nodes->getInfoVecs().nodeActinLevel.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeActinLevel.begin() //Ali )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin(),hostTmpVectorNodeActinLevel.begin() ))); */ thrust::copy(nodes->getInfoVecs().nodeActinLevel.begin(),nodes->getInfoVecs().nodeActinLevel.begin()+ maxActiveNode,hostTmpVectorNodeActinLevel.begin()); //Ali thrust::copy(nodes->getInfoVecs().basalContractPair.begin() ,nodes->getInfoVecs().basalContractPair.begin() + maxActiveNode,hostTmpContractPair.begin()); //Ali // thrust::copy(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelX.begin() + maxActiveNode, hostTmpNodeVelX.begin()); // thrust::copy(nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeVelY.begin() + maxActiveNode, hostTmpNodeVelY.begin()); thrust::copy(nodes->getInfoVecs().contractActomyo_multip_apical.begin(), nodes->getInfoVecs().contractActomyo_multip_apical.begin() + maxActiveNode, hostTmpNodeContrApi.begin()); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpInterCellForce;//AAMIRI double tmpCurv; double tmpMembTen ; double tmpActinLevel ; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeInterCellForceT, nodeInterCellForceN;//AAMIRI double aniVal; double tmpNodeVel_Mag; double tmpNodeContrApi; //double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { //tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI tmpMembTen = hostTmpVectorTenMag[index1];//Ali rawAniData.aniNodeMembTension.push_back(tmpMembTen);//Ali tmpActinLevel = hostTmpVectorNodeActinLevel[index1];//Ali rawAniData.aniNodeActinLevel.push_back(tmpActinLevel);//Ali node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE //tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+hostTmpVectorF_MI_M_N[index1] ; //AliE nodeInterCellForceT = hostTmpVectorInterCellForceTangent[index1];//AAMIRI nodeInterCellForceN = hostTmpVectorInterCellForceNormal[index1];//AAMIRI tmpInterCellForce = CVector(nodeInterCellForceT, nodeInterCellForceN, 0.0);//AAMIRI rawAniData.aniNodeInterCellForceArr.push_back(tmpInterCellForce); // tmpNodeVel_Mag = sqrt(hostTmpNodeVelX[index1]*hostTmpNodeVelX[index1] + hostTmpNodeVelY[index1]*hostTmpNodeVelY[index1]); // rawAniData.aniNodeVel_Mag.push_back(tmpNodeVel_Mag); tmpNodeContrApi = hostTmpNodeContrApi[index1]; rawAniData.aniNodeContrApi.push_back(tmpNodeContrApi); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI tmpMembTen = hostTmpVectorTenMag[index1];//Ali rawAniData.aniNodeMembTension.push_back(tmpMembTen);//Ali tmpActinLevel = hostTmpVectorNodeActinLevel[index1];//Ali rawAniData.aniNodeActinLevel.push_back(tmpActinLevel);//Ali node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeInterCellForceT = hostTmpVectorInterCellForceTangent[index1];//AAMIRI nodeInterCellForceN = hostTmpVectorInterCellForceNormal[index1];//AAMIRI tmpInterCellForce = CVector(nodeInterCellForceT, nodeInterCellForceN, 0.0);//AAMIRI // tmpNodeVel_Mag = sqrt(hostTmpNodeVelX[index1]*hostTmpNodeVelX[index1] + hostTmpNodeVelY[index1]*hostTmpNodeVelY[index1]); // rawAniData.aniNodeVel_Mag.push_back(tmpNodeVel_Mag); tmpNodeContrApi = hostTmpNodeContrApi[index1]; rawAniData.aniNodeContrApi.push_back(tmpNodeContrApi); rawAniData.aniNodeInterCellForceArr.push_back(tmpInterCellForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } // for adhesion pair for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; // rawAniData.memLinks.push_back(linkData); I don't want this type of membrane nodes links be shown. } } } // loop for links for basal contraction. Since the links are between membrane nodes, no new map needs to be created. for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; index2 = hostTmpContractPair[index1]; if (index2 == -1) { continue; } IndexMap::iterator it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; //rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added rawAniData.aniNodeF_MI_M_MagN_Int.push_back(cellInfoVecs.cellPressure[i]) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; // rawAniData.internalLinks.push_back(linkData); I don't want internal node links be shown. } } } } } return rawAniData; } vector<AniResumeData> SceCells::obtainResumeData() { //AliE //Copy from GPU to CPU node properties uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpNodeLocX(maxActiveNode); thrust::host_vector<double> hostTmpNodeLocY(maxActiveNode); thrust::host_vector<double> hostTmpDppLevel(maxActiveNode); thrust::host_vector<bool> hostTmpNodeIsActive(maxActiveNode); thrust::host_vector<MembraneType1> hostTmpMemNodeType(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().dppLevel.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().dppLevel.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().memNodeType1.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpDppLevel.begin(), hostTmpNodeIsActive.begin(), hostTmpNodeLocX.begin(), hostTmpNodeLocY.begin(), hostTmpMemNodeType.begin()))); // Copy from GPU to CPU cell properties. Since cell vectors are small copy with thrust function seems unnecessary thrust::host_vector<uint> hostTmpActiveMemNodeCounts =cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<ECellType> hostTmpCellType =cellInfoVecs.eCellTypeV2 ; thrust::host_vector<double>hostTmpCellCntrX =cellInfoVecs.centerCoordX ; thrust::host_vector<double>hostTmpCellCntrY =cellInfoVecs.centerCoordY ; // Write it nicely in CPU vectorial form that can be easily wirtten in an output file. vector <AniResumeData> aniResumeDatas ; AniResumeData membraneResumeData; AniResumeData internalResumeData; AniResumeData cellResumeData; CVector tmpPos; uint index1; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < hostTmpActiveMemNodeCounts[i]; j++) { index1 = i * maxNodePerCell + j; if ( hostTmpNodeIsActive[index1]==true) { membraneResumeData.cellRank.push_back(i); // it is cell rank membraneResumeData.nodeType.push_back(hostTmpMemNodeType[index1]); membraneResumeData.signalLevel.push_back(hostTmpDppLevel[index1]); tmpPos=CVector(hostTmpNodeLocX[index1],hostTmpNodeLocY[index1],0) ; membraneResumeData.nodePosArr.push_back(tmpPos) ; } } } aniResumeDatas.push_back(membraneResumeData) ; //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = i * maxNodePerCell + j; if ( hostTmpNodeIsActive[index1]==true ) { internalResumeData.cellRank.push_back(i); // it is cell rank tmpPos=CVector(hostTmpNodeLocX[index1],hostTmpNodeLocY[index1],0) ; internalResumeData.nodePosArr.push_back(tmpPos) ; } } } aniResumeDatas.push_back(internalResumeData) ; // loop for cells for (uint i=0; i<activeCellCount; i++){ cellResumeData.cellRank.push_back(i); cellResumeData.cellType.push_back(hostTmpCellType[i]); tmpPos=CVector(hostTmpCellCntrX[i],hostTmpCellCntrY[i],0) ; cellResumeData.nodePosArr.push_back(tmpPos) ; } aniResumeDatas.push_back(cellResumeData) ; return aniResumeDatas; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec, std::vector<ECellType> &eCellTypeV1) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); thrust::copy(eCellTypeV1.begin(), eCellTypeV1.end(), cellInfoVecs.eCellTypeV2.begin()); // v2 might be bigger //for (int i=0 ; i<eCellTypeV1.size() ; i++ ) { // cout << "fourth check for cell type" << cellInfoVecs.eCellTypeV2[i] << endl ; // } } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.colorScale3 = rawAniData.aniNodeMembTension[i];//Ali //ptAniData.colorScale4 = rawAniData.aniNodeVel_Mag[i];//rawAniData.aniNodeActinLevel[i];//Ali ptAniData.colorScale4 = rawAniData.aniNodeContrApi[i]; ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.intercellForce = rawAniData.aniNodeInterCellForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); cudaMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); cudaMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); cudaMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 cudaMemcpyToSymbol(kContractMemb, &membrPara.kContractMemb, sizeof(double)); cudaMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); cudaMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); cudaMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI cudaMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); cudaMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; cudaMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //cudaMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); cudaMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); cudaMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); ////////////////////// //// Block Nucleus ///////// ////////////////////// double sceNucleusParaCPU_M[5]; double U0_Nucleus = globalConfigVars.getConfigValue("NucleusCell_U0").toDouble(); double V0_Nucleus = globalConfigVars.getConfigValue("NucleusCell_V0").toDouble(); double k1_Nucleus = globalConfigVars.getConfigValue("NucleusCell_k1").toDouble(); double k2_Nucleus = globalConfigVars.getConfigValue("NucleusCell_k2").toDouble(); double nucleusLinkEffectiveRange = globalConfigVars.getConfigValue( "NucleusEffectRange").toDouble(); sceNucleusParaCPU_M[0] = U0_Nucleus; sceNucleusParaCPU_M[1] = V0_Nucleus; sceNucleusParaCPU_M[2] = k1_Nucleus; sceNucleusParaCPU_M[3] = k2_Nucleus; sceNucleusParaCPU_M[4] = nucleusLinkEffectiveRange; ////////////////////// //// Block Nucleus Division ///////// ////////////////////// double sceNucleusParaDivCPU_M[5]; double U0_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_U0_Div").toDouble(); double V0_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_V0_Div").toDouble(); double k1_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_k1_Div").toDouble(); double k2_Nucleus_Div = globalConfigVars.getConfigValue("NucleusCell_k2_Div").toDouble(); double nucleusDivEffectiveRange = globalConfigVars.getConfigValue( "NucleusDivEffectRange").toDouble(); sceNucleusParaDivCPU_M[0] = U0_Nucleus_Div; sceNucleusParaDivCPU_M[1] = V0_Nucleus_Div; sceNucleusParaDivCPU_M[2] = k1_Nucleus_Div; sceNucleusParaDivCPU_M[3] = k2_Nucleus_Div; sceNucleusParaDivCPU_M[4] = nucleusDivEffectiveRange; cudaMemcpyToSymbol(sceN_M, sceNucleusParaCPU_M, 5 * sizeof(double)); //Ali cudaMemcpyToSymbol(sceNDiv_M, sceNucleusParaDivCPU_M, 5 * sizeof(double)); //Ali } void SceCells::updateMembrGrowthProgress_M() { // figure out membr growth speed calMembrGrowSpeed_M(); //Ali: to my understanding it doesn't do anything right now. it will be override by adjustMembrGrowSpeed_M // figure out which cells will add new point and which cell needs to delete node. // adjustMembrGrowSpeed_M(); // for now just a constant speed to give some relaxation before adding another node. // returning a bool and progress for each cell. if bool is true (a node sould be added) progress will be reset to give relaxation time after adding the node. Otherwise growth prgoress will be incremented // add membr nodes // In each time step either adding mechanism is active or deleting mechanism. It is an unneccessary complication to manage memory for both operations at one time step. // uint curActCellCt = allocPara_m.currentActiveCellCount; // thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), // cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, // cellInfoVecs.membrGrowProgress.begin(), // cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); } void SceCells::handleMembrGrowth_M(int maxApicalBasalNodeNum, double maxLengthToAddMemNodes) { if (1>0){//addNode) { decideIfAddMembrNode_M(maxApicalBasalNodeNum, maxLengthToAddMemNodes); addMembrNodes_M(); // addNode=false ; // cout << " I am in add membrane node " << endl ; } else { // decideIfDelMembrNode_M(); //Ali // delMembrNodes_M(); // addNode=true ; // cout << " I am in del membrane node " << endl ; } //membrDebug(); } // void SceCells::calMembrGrowSpeed_M() { // membrPara.membrGrowCoeff = growthAuxData.prolifDecay // * membrPara.membrGrowCoeff_Ori; // membrPara.membrGrowLimit = growthAuxData.prolifDecay // * membrPara.membrGrowLimit_Ori; // // reduce_by_key, find value of max tension and their index // thrust::counting_iterator<uint> iBegin(0); // uint maxNPerCell = allocPara_m.maxAllNodePerCell; // thrust::reduce_by_key( // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().membrTenMagRi.begin(), // make_transform_iterator(iBegin, // ModuloFunctor(maxNPerCell)), // nodes->getInfoVecs().membrLinkRiMidX.begin(), // nodes->getInfoVecs().membrLinkRiMidY.begin(), // nodes->getInfoVecs().membrDistToRi.begin())), // cellInfoVecs.cellRanksTmpStorage.begin(), // thrust::make_zip_iterator( // thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.maxDistToRiVec.begin())), // thrust::equal_to<uint>(), MaxWInfo()); // // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // // } // //Ali for min Distance // thrust::counting_iterator<uint> iBegin_min(0); // thrust::reduce_by_key( // make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)), // begin of the key // make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)) // end of the key // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().membrDistToRi.begin(), // make_transform_iterator(iBegin_min, // values to reduce by key // ModuloFunctor(maxNPerCell)) // )), // cellInfoVecs.cellRanksTmpStorage1.begin(), // to Store reduced version of key // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.minDistToRiVec.begin(), // cellInfoVecs.minTenIndxVec.begin() // to sotred the reduce verision of values // )), // thrust::equal_to<uint>(), MinWInfo()); // how to sort the keys & how to reduce the parameters assigned to based on each key // // equal_to mean how we set the beans to reduce. For example here we are saying if they are equal in Int we compare them and would peroform the reduction. // // for (int i=0 ; i<cellInfoVecs.minDistToRiVec.size() ; i++) { // // cout << "the min distance in cell" << i << " is "<<cellInfoVecs.minDistToRiVec[i] << endl ; // // cout << "the min tension index vec" << i << " is "<<cellInfoVecs.minTenIndxVec[i] << endl ; // // } // thrust::reduce_by_key( // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), // make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) // + totalNodeCountForActiveCells, // nodes->getInfoVecs().membrTensionMag.begin(), // cellInfoVecs.cellRanksTmpStorage.begin(), // cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), // thrust::plus<double>()); // thrust::transform(cellInfoVecs.aveTension.begin(), // cellInfoVecs.aveTension.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // // linear relationship with highest tension; capped by a given value // thrust::transform(cellInfoVecs.aveTension.begin(), // cellInfoVecs.aveTension.begin() // + allocPara_m.currentActiveCellCount, // cellInfoVecs.membrGrowSpeed.begin(), // MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); // } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin(), nodes->getInfoVecs().memNodeType1.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // cout << "At index "<<cellInfoVecs.maxTenIndxVec[i]<<std::endl; // } //Ali for min Distance thrust::counting_iterator<uint> iBegin_min(0); thrust::reduce_by_key( make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)), // begin of the key make_transform_iterator(iBegin_min, DivideFunctor(maxNPerCell)) // end of the key + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrDistToRi.begin(), make_transform_iterator(iBegin_min, // values to reduce by key ModuloFunctor(maxNPerCell)) )), cellInfoVecs.cellRanksTmpStorage1.begin(), // to Store reduced version of key thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.minTenIndxVec.begin() // to sotred the reduce verision of values )), thrust::equal_to<uint>(), MinWInfo()); // how to sort the keys & how to reduce the parameters assigned to based on each key // equal_to mean how we set the beans to reduce. For example here we are saying if they are equal in Int we compare them and would peroform the reduction. // for (int i=0 ; i<cellInfoVecs.minDistToRiVec.size() ; i++) { // cout << "the min distance in cell" << i << " is "<<cellInfoVecs.minDistToRiVec[i] << endl ; // cout << "the min tension index vec" << i << " is "<<cellInfoVecs.minTenIndxVec[i] << endl ; // } thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M(int maxApicalBasalNodeNum, double maxLengthToAddMemNodes) { // decide if add membrane node given current active node count and // membr growth progresss uint curActCellCt = allocPara_m.currentActiveCellCount; uint maxMembrNode = allocPara_m.maxMembrNodePerCell; bool isInitPhase= nodes->isInitPhase ; /* thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); */ /* thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin(), cellInfoVecs.cellRankVec.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.maxDistToRiVec.begin(), cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin(), cellInfoVecs.maxTenIndxTypeVec.begin(), cellInfoVecs.cellRankVec.begin() )) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.numApicalVec.begin(), cellInfoVecs.numBasalVec.begin())), MemGrowFunc(maxMembrNode,isInitPhase, maxApicalBasalNodeNum, maxLengthToAddMemNodes)); } //Ali void SceCells::decideIfDelMembrNode_M() { uint curActCellCt = allocPara_m.currentActiveCellCount; uint maxMembrNode = allocPara_m.maxMembrNodePerCell; bool isInitPhase= nodes->isInitPhase ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.growthProgress.begin() )), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.minDistToRiVec.begin(), cellInfoVecs.growthProgress.begin() )) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrRemovingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemDelFunc(maxMembrNode, isInitPhase)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ // void SceCells::addMembrNodes_M() { // thrust::counting_iterator<uint> iBegin(0); // uint curAcCCount = allocPara_m.currentActiveCellCount; // uint maxNodePerCell = allocPara_m.maxAllNodePerCell; // bool* nodeIsActiveAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeIsActive[0])); // double* nodeXPosAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeYPosAddress = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // int* adhIndxAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeAdhereIndex[0])); // MembraneType1* memNodeType1 = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // thrust::transform_if( // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())), // thrust::make_zip_iterator( // thrust::make_tuple(iBegin, // cellInfoVecs.maxTenIndxVec.begin(), // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.maxTenRiMidXVec.begin(), // cellInfoVecs.maxTenRiMidYVec.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())) // + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), // thrust::make_zip_iterator( // thrust::make_tuple( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.ringApicalId.begin(), // cellInfoVecs.ringBasalId.begin())), // AddMemNode(maxNodePerCell, // nodeIsActiveAddress, // nodeXPosAddress, // nodeYPosAddress, // adhIndxAddr, // memNodeType1), // thrust::identity<bool>()); // } void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; bool* nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double* nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); int* adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[0])); MembraneType1* memNodeType1 = thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[0])); thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, nodeIsActiveAddress, nodeXPosAddress, nodeYPosAddress, adhIndxAddr, memNodeType1), thrust::identity<bool>()); for (int z = 0; z < allocPara_m.currentActiveCellCount; z++){ if (cellInfoVecs.isMembrAddingNode[z] == true){ nodes->isMemNodeTypeAssigned_postAddNode=false; cellInfoVecs.isPostAddMembrNodes = true; break; } } } //Ali void SceCells::delMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.minTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())) + curAcCCount, cellInfoVecs.isMembrRemovingNode.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.ringApicalId.begin(), cellInfoVecs.ringBasalId.begin())), DelMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr,growthAuxData.memNodeType1Address), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpNodeType1[index] = divAuxData.tmp1VecMemNodeType[j] ; //Ali // std::cout<<"divAuxData.tmpPos1_M["<<index<<"] = "<<divAuxData.tmpXPos1_M[index]<<" "<<divAuxData.tmpYPos1_M[index]<<", tmpNodeType1 = "<<divAuxData.tmpNodeType1[index]<<std::endl; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpNodeType2[index] = divAuxData.tmp2VecMemNodeType[j] ; //Ali // std::cout<<"divAuxData.tmpPos2_M["<<index<<"] = "<<divAuxData.tmpXPos2_M[index]<<" "<<divAuxData.tmpYPos2_M[index]<<", tmpNodeType2 = "<<divAuxData.tmpNodeType2[index]<<std::endl; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); std::cout<<"divAuxData.tmp1MemActiveCounts size = "<<divAuxData.tmp1MemActiveCounts[0]<<std::endl; divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); std::cout<<"divAuxData.tmp2MemActiveCounts size = "<<divAuxData.tmp2MemActiveCounts[0]<<std::endl; for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpNodeType1[index] = notAssigned1 ; //Ali divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpNodeType2[index] = notAssigned1 ; //Ali divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } // we have two new center of internal node positions. // we already shrinked the internal nodes around their old internal nodes center // here we shift the internal nodes of each cell around the new internal node position void SceCells::shiftIntnlNodesByCellCenter(CVector intCell1Center, CVector intCell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = intCell1Center - tmpCell1Center; // it should be new nucleus center for cell1 for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = intCell2Center - tmpCell2Center; // it should be new nucleus center for cell 2 for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(uint i, std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2, CVector oldNucleusCenter) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); divAuxData.tmp1VecMemNodeType.clear(); //Ali divAuxData.tmp2VecMemNodeType.clear(); //Ali uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); // Here we perform a cross-product computation to check the node order orientation. If it is clockwise, reverse it so it is counter-clockwise. double vec1x = (tmp1[0].vec.x - oldNucleusCenter.x); double vec2x = (tmp1[1].vec.x - oldNucleusCenter.x); double vec1y = (tmp1[0].vec.y - oldNucleusCenter.y); double vec2y = (tmp1[1].vec.y - oldNucleusCenter.y); double orientation = vec1x*vec2y - vec2x*vec1y; if (orientation < 0){ std::cout<<"tmp1 orientation incorrect. Rearranging tmp1."<<std::endl; std::vector<VecValT> tmp_tmp1 = tmp1; tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[(tmp_tmp1.size()-1) - i]); } } vec1x = (tmp2[0].vec.x - oldNucleusCenter.x); vec2x = (tmp2[1].vec.x - oldNucleusCenter.x); vec1y = (tmp2[0].vec.y - oldNucleusCenter.y); vec2y = (tmp2[1].vec.y - oldNucleusCenter.y); orientation = vec1x*vec2y - vec2x*vec1y; if (orientation < 0){ std::cout<<"tmp2 orientation incorrect. Rearranging tmp2."<<std::endl; std::vector<VecValT> tmp_tmp2 = tmp2; tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[(tmp_tmp2.size()-1) - i]); } } // std::vector<VecValT> sorted_tmp1 = tmp1; // std::vector<VecValT> sorted_tmp2 = tmp2; // std::sort(sorted_tmp1.begin(), sorted_tmp1.end()); // std::sort(sorted_tmp2.begin(), sorted_tmp2.end()); // for (int j = 0; j < tmp1.size(); j++){ // std::cout<<"tmp1["<<j<<"].val = "<<tmp1[j].val<<", vec = "<<tmp1[j].vec.x<<" "<<tmp1[j].vec.y<<std::endl; // } // for (int j = 0; j < tmp2.size(); j++){ // std::cout<<"tmp2["<<j<<"].val = "<<tmp2[j].val<<", vec = "<<tmp2[j].vec.x<<" "<<tmp2[j].vec.y<<std::endl; // } //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; uint Num_of_NodeType0_MotherCell = 0, Num_of_NodeType0_DaughterCell = 0, Num_of_NodeType1_DaughterCell = 0, Num_of_NodeType1_MotherCell = 0; if (divAuxData.isMotherCellBehind[i] == true){ // Recall that tmp1 will always be the mother cell data //Kevin for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralB){ Num_of_NodeType0_MotherCell+=1; } } if (Num_of_NodeType0_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralA){ Num_of_NodeType1_DaughterCell+=1; } } if (Num_of_NodeType1_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introducedj for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } // Num_of_NodeType0_MotherCell = ; // Num_of_NodeType1_DaughterCell = ; } else{ for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralA){ Num_of_NodeType1_MotherCell+=1; } } if (Num_of_NodeType1_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralB){ Num_of_NodeType0_DaughterCell+=1; } } if (Num_of_NodeType0_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introduced for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } } std::cout<<"Num_of_NodeType0_MotherCell = "<<Num_of_NodeType0_MotherCell<<std::endl; std::cout<<"Num_of_NodeType1_MotherCell = "<<Num_of_NodeType1_MotherCell<<std::endl; std::cout<<"Num_of_NodeType0_DaughterCell = "<<Num_of_NodeType0_DaughterCell<<std::endl; std::cout<<"Num_of_NodeType1_DaughterCell = "<<Num_of_NodeType1_DaughterCell<<std::endl; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, Num_of_NodeType0_MotherCell, maxDivMembrNodeCount1); } else{ ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, Num_of_NodeType1_MotherCell, maxDivMembrNodeCount1); } // std::cout<<"tmp1[tmp1.size() - 1] = "<<tmp1[tmp1.size() - 1].vec.x<<" "<<tmp1[tmp1.size()-1].vec.y<<std::endl; // std::cout<<"tmp1[0] = "<<tmp1[0].vec.x<<" "<<tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); // std::cout<<"sorted_tmp1[tmp1.size() - 1].val = "<<sorted_tmp1[sorted_tmp1.size()-1].val<<", .vec = "<< sorted_tmp1[sorted_tmp1.size() - 1].vec.x<<" "<<sorted_tmp1[sorted_tmp1.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp1[0].val = "<<sorted_tmp1[0].val<<", .vec = "<<sorted_tmp1[0].vec.x<<" "<<sorted_tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(sorted_tmp1[sorted_tmp1.size() - 1].vec, sorted_tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, Num_of_NodeType1_DaughterCell, maxDivMembrNodeCount2); } else{ ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, Num_of_NodeType0_DaughterCell, maxDivMembrNodeCount2); } // std::cout<<"tmp2[tmp1.size() - 1] = "<<tmp2[tmp2.size() - 1].vec.x<<" "<<tmp2[tmp2.size()-1].vec.y<<std::endl; // std::cout<<"tmp2[0] = "<<tmp2[0].vec.x<<" "<<tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); // std::cout<<"sorted_tmp2[sorted_tmp2.size() - 1].val = "<<sorted_tmp2[sorted_tmp2.size()-1].val<<", .vec = "<<sorted_tmp2[sorted_tmp2.size() - 1].vec.x<<" "<<sorted_tmp2[sorted_tmp2.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp2[0].val = "<<sorted_tmp2[0].val<<", vec = "<<sorted_tmp2[0].vec.x<<" "<<sorted_tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(sorted_tmp2[sorted_tmp2.size() - 1].vec, sorted_tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); divAuxData.tmp1VecMemNodeType.push_back(tmp1[j].type); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); divAuxData.tmp2VecMemNodeType.push_back(tmp2[j].type); } std::cout<<"ptsBetween1 size = "<<ptsBetween1.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } std::cout<<"ptsBetween2 size = "<<ptsBetween2.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } //Here we will try to adjust the positions of newly added nodes (from ptsBetween1 and ptsBetween2) according to the 'center line' of the cell. assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::processMemVec_Ver2(uint i, std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2, CVector oldNucleusCenter, std::vector<CVector>& cellCenterLine_Basal2Apical, std::vector<CVector>& cellCenterLine_Apical2Basal, std::vector<CVector>& cellCenterLine_Basal2Apical_leftShift, std::vector<CVector>& cellCenterLine_Basal2Apical_rightShift, std::vector<CVector>& cellCenterLine_Apical2Basal_leftShift, std::vector<CVector>& cellCenterLine_Apical2Basal_rightShift, std::vector<double>& cellCenterLine_MirrorLength_Basal2Apical, std::vector<double>& cellCenterLine_MirrorLength_Apical2Basal) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); divAuxData.tmp1VecMemNodeType.clear(); //Ali divAuxData.tmp2VecMemNodeType.clear(); //Ali uint membThreshold = allocPara_m.maxMembrNodePerCell; // std::sort(tmp1.begin(), tmp1.end()); // std::sort(tmp2.begin(), tmp2.end()); // Here we perform a cross-product computation to check the node order orientation. If it is clockwise, reverse it so it is counter-clockwise. double vec1x = (tmp1[0].vec.x - oldNucleusCenter.x); double vec2x = (tmp1[1].vec.x - oldNucleusCenter.x); double vec1y = (tmp1[0].vec.y - oldNucleusCenter.y); double vec2y = (tmp1[1].vec.y - oldNucleusCenter.y); double orientation = vec1x*vec2y - vec2x*vec1y; std::cout<<"tmp1 orientation = "<<orientation<<std::endl; if (orientation < 0){ std::cout<<"tmp1 orientation incorrect. Rearranging tmp1."<<std::endl; std::vector<VecValT> tmp_tmp1 = tmp1; for (int i = 0; i < tmp1.size(); i++){ // std::cout<<"tmp1 "<<tmp1[i].vec.x<<" "<<tmp1[i].vec.y<<" "<<tmp1[i].val<<" "<<tmp1[i].type<<std::endl; } tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[(tmp_tmp1.size()-1) - i]); } // for (int i = 0; i < tmp_tmp1.size(); i++){ // std::cout<<"tmp_tmp1 "<<tmp_tmp1[i].vec.x<<" "<<tmp_tmp1[i].vec.y<<" "<<tmp_tmp1[i].val<<" "<<tmp_tmp1[i].type<<std::endl; // } } vec1x = (tmp2[0].vec.x - oldNucleusCenter.x); vec2x = (tmp2[1].vec.x - oldNucleusCenter.x); vec1y = (tmp2[0].vec.y - oldNucleusCenter.y); vec2y = (tmp2[1].vec.y - oldNucleusCenter.y); orientation = vec1x*vec2y - vec2x*vec1y; std::cout<<"tmp2 orientation = "<<orientation<<std::endl; if (orientation < 0){ std::cout<<"tmp2 orientation incorrect. Rearranging tmp2."<<std::endl; std::vector<VecValT> tmp_tmp2 = tmp2; for (int i = 0; i < tmp2.size(); i++){ // std::cout<<"tmp2 "<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<" "<<tmp2[i].val<<" "<<tmp2[i].type<<std::endl; } tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[(tmp_tmp2.size()-1) - i]); } // for (int i = 0; i < tmp_tmp2.size(); i++){ // std::cout<<"tmp_tmp2 "<<tmp_tmp2[i].vec.x<<" "<<tmp_tmp2[i].vec.y<<" "<<tmp_tmp2[i].val<<" "<<tmp_tmp2[i].type<<std::endl; // } } // std::vector<VecValT> sorted_tmp1 = tmp1; // std::vector<VecValT> sorted_tmp2 = tmp2; // std::sort(sorted_tmp1.begin(), sorted_tmp1.end()); // std::sort(sorted_tmp2.begin(), sorted_tmp2.end()); // for (int j = 0; j < sorted_tmp1.size(); j++){ // std::cout<<"sorted_tmp1["<<j<<"].val = "<<sorted_tmp1[j].val<<", vec = "<<sorted_tmp1[j].vec.x<<" "<<sorted_tmp1[j].vec.y<<std::endl; // } // for (int j = 0; j < sorted_tmp2.size(); j++){ // std::cout<<"sorted_tmp2["<<j<<"].val = "<<sorted_tmp2[j].val<<", vec = "<<sorted_tmp2[j].vec.x<<" "<<sorted_tmp2[j].vec.y<<std::endl; // } // for (int j = 0; j < tmp1.size(); j++){ // std::cout<<"tmp1["<<j<<"].val = "<<tmp1[j].val<<", vec = "<<tmp1[j].vec.x<<" "<<tmp1[j].vec.y<<" , type = "<<tmp2[j].type<<std::endl; // } // for (int j = 0; j < tmp2.size(); j++){ // std::cout<<"tmp2["<<j<<"].val = "<<tmp2[j].val<<", vec = "<<tmp2[j].vec.x<<" "<<tmp2[j].vec.y<<" , type = "<<tmp2[j].type<<std::endl; // } // //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); // //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; uint Num_of_NodeType0_MotherCell = 0, Num_of_NodeType0_DaughterCell = 0, Num_of_NodeType1_DaughterCell = 0, Num_of_NodeType1_MotherCell = 0; if (divAuxData.isMotherCellBehind[i] == true){ // Recall that tmp1 will always be the mother cell data //Kevin for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralB){ Num_of_NodeType0_MotherCell+=1; } } if (Num_of_NodeType0_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralA){ Num_of_NodeType1_DaughterCell+=1; } } if (Num_of_NodeType1_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introducedj for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } // Num_of_NodeType0_MotherCell = ; // Num_of_NodeType1_DaughterCell = ; } else{ for (int j = 0; j < tmp1.size(); j++){ if (tmp1[j].type == lateralA){ Num_of_NodeType1_MotherCell+=1; } } if (Num_of_NodeType1_MotherCell > maxDivMembrNodeCount1){ std::cout<<"Too many new nodes are needed to be introduced for mother cell! Change the max number of mem nodes allowed!"<<std::endl; } for (int j = 0; j < tmp2.size(); j++){ if (tmp2[j].type == lateralB){ Num_of_NodeType0_DaughterCell+=1; } } if (Num_of_NodeType0_DaughterCell > maxDivMembrNodeCount2){ std::cout<<"Too many new nodes are needed to be introduced for daughter cell! Change the max number of mem nodes allowed!"<<std::endl; } } std::cout<<"Num_of_NodeType0_MotherCell = "<<Num_of_NodeType0_MotherCell<<std::endl; std::cout<<"Num_of_NodeType1_MotherCell = "<<Num_of_NodeType1_MotherCell<<std::endl; std::cout<<"Num_of_NodeType0_DaughterCell = "<<Num_of_NodeType0_DaughterCell<<std::endl; std::cout<<"Num_of_NodeType1_DaughterCell = "<<Num_of_NodeType1_DaughterCell<<std::endl; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // Num_of_NodeType0_MotherCell, maxDivMembrNodeCount1); ptsBetween1 = obtainPtsBetween_cellCenterLine(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, cellCenterLine_Basal2Apical, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_MirrorLength_Basal2Apical); } else{ // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // Num_of_NodeType1_MotherCell, maxDivMembrNodeCount1); ptsBetween1 = obtainPtsBetween_cellCenterLine(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, cellCenterLine_Apical2Basal, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Apical2Basal); } // std::cout<<"tmp1[tmp1.size() - 1] = "<<tmp1[tmp1.size() - 1].vec.x<<" "<<tmp1[tmp1.size()-1].vec.y<<std::endl; // std::cout<<"tmp1[0] = "<<tmp1[0].vec.x<<" "<<tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); // std::cout<<"sorted_tmp1[tmp1.size() - 1].val = "<<sorted_tmp1[sorted_tmp1.size()-1].val<<", .vec = "<< sorted_tmp1[sorted_tmp1.size() - 1].vec.x<<" "<<sorted_tmp1[sorted_tmp1.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp1[0].val = "<<sorted_tmp1[0].val<<", .vec = "<<sorted_tmp1[0].vec.x<<" "<<sorted_tmp1[0].vec.y<<std::endl; // ptsBetween1 = obtainPtsBetween(sorted_tmp1[sorted_tmp1.size() - 1].vec, sorted_tmp1[0].vec, // memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { if (divAuxData.isMotherCellBehind[i] == true){ // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // Num_of_NodeType1_DaughterCell, maxDivMembrNodeCount2); ptsBetween2 = obtainPtsBetween_cellCenterLine(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, cellCenterLine_Apical2Basal, cellCenterLine_Apical2Basal_rightShift, cellCenterLine_MirrorLength_Apical2Basal); } else{ // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // Num_of_NodeType0_DaughterCell, maxDivMembrNodeCount2); ptsBetween2 = obtainPtsBetween_cellCenterLine(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, cellCenterLine_Basal2Apical, cellCenterLine_Basal2Apical_leftShift, cellCenterLine_MirrorLength_Basal2Apical); } // std::cout<<"tmp2[tmp1.size() - 1] = "<<tmp2[tmp2.size() - 1].vec.x<<" "<<tmp2[tmp2.size()-1].vec.y<<std::endl; // std::cout<<"tmp2[0] = "<<tmp2[0].vec.x<<" "<<tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); // std::cout<<"sorted_tmp2[sorted_tmp2.size() - 1].val = "<<sorted_tmp2[sorted_tmp2.size()-1].val<<", .vec = "<<sorted_tmp2[sorted_tmp2.size() - 1].vec.x<<" "<<sorted_tmp2[sorted_tmp2.size()-1].vec.y<<std::endl; // std::cout<<"sorted_tmp2[0].val = "<<sorted_tmp2[0].val<<", vec = "<<sorted_tmp2[0].vec.x<<" "<<sorted_tmp2[0].vec.y<<std::endl; // ptsBetween2 = obtainPtsBetween(sorted_tmp2[sorted_tmp2.size() - 1].vec, sorted_tmp2[0].vec, // memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); divAuxData.tmp1VecMemNodeType.push_back(tmp1[j].type); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); divAuxData.tmp2VecMemNodeType.push_back(tmp2[j].type); } std::cout<<"ptsBetween1 size = "<<ptsBetween1.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralB); } // std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); // std::cout<<"ptsBtween1 "<<ptsBetween1[j].x<<" "<<ptsBetween1[j].y<<std::endl; divAuxData.tmp1VecMemNodeType.push_back(lateralA); } std::cout<<"size of tmp1VecMemNodeType = "<<divAuxData.tmp1VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp1VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp1VecMem["<<j<<"] = "<<divAuxData.tmp1VecMem[j].x<<" "<<divAuxData.tmp1VecMem[j].y<<" "<<divAuxData.tmp1VecMem[j].z<<", Type = "<<divAuxData.tmp1VecMemNodeType[j]<<std::endl; // } } std::cout<<"ptsBetween2 size = "<<ptsBetween2.size()<<std::endl; if (divAuxData.isMotherCellBehind[i] == false){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralA); } // std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } else if (divAuxData.isMotherCellBehind[i] == true){ for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); // std::cout<<"ptsBtween2 "<<ptsBetween2[j].x<<" "<<ptsBetween2[j].y<<std::endl; divAuxData.tmp2VecMemNodeType.push_back(lateralB); } std::cout<<"size of tmp2VecMemNodeType = "<<divAuxData.tmp2VecMemNodeType.size()<<std::endl; // for (uint j = 0; j < divAuxData.tmp2VecMemNodeType.size();j++){ // std::cout<<"divAuxData.tmp2VecMem["<<j<<"] = "<<divAuxData.tmp2VecMem[j].x<<" "<<divAuxData.tmp2VecMem[j].y<<" "<<divAuxData.tmp2VecMem[j].z<<", Type = "<<divAuxData.tmp2VecMemNodeType[j]<<std::endl; // } } //Here we will try to adjust the positions of newly added nodes (from ptsBetween1 and ptsBetween2) according to the 'center line' of the cell. assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } //Ali void SceCells::obtainMembrAndIntnlNodesPlusNodeType(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; MembraneType1 nodeTypeI = divAuxData.tmpNodeType[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } void SceCells::obtainMembrAndIntnlNodesPlusNodeType2(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv, vector<CVector>& cellCenterLine_Basal2Apical, vector<CVector>& cellCenterLine_Apical2Basal, vector<CVector>& cellCenterLine_Basal2Apical_leftShift, vector<CVector>& cellCenterLine_Basal2Apical_rightShift, vector<CVector>& cellCenterLine_Apical2Basal_leftShift, vector<CVector>& cellCenterLine_Apical2Basal_rightShift, vector<double>& cellCenterLine_MirrorLength_Basal2Apical, vector<double>& cellCenterLine_MirrorLength_Apical2Basal) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Apical2Basal.clear(); cellCenterLine_Apical2Basal_leftShift.clear(); cellCenterLine_Apical2Basal_rightShift.clear(); cellCenterLine_Basal2Apical_leftShift.clear(); cellCenterLine_Basal2Apical_rightShift.clear(); cellCenterLine_MirrorLength_Basal2Apical.clear(); cellCenterLine_MirrorLength_Apical2Basal.clear(); vector<CVector> tmpCellCenterLine_Basal2Apical; vector<CVector> tmpCellCenterLine_Basal2Apical_leftShift; vector<CVector> tmpCellCenterLine_Basal2Apical_rightShift; vector<double> tmpCellCenterLine_MirrorLength_Basal2Apical; vector<CVector> tmpCellCenterLine_Apical2Basal; vector<CVector> tmpCellCenterLine_Apical2Basal_leftShift; vector<CVector> tmpCellCenterLine_Apical2Basal_rightShift; vector<double> tmpCellCenterLine_MirrorLength_Apical2Basal; tmpCellCenterLine_Basal2Apical.clear(); tmpCellCenterLine_Basal2Apical_leftShift.clear(); tmpCellCenterLine_Basal2Apical_rightShift.clear(); tmpCellCenterLine_MirrorLength_Basal2Apical.clear(); tmpCellCenterLine_Apical2Basal.clear(); tmpCellCenterLine_Apical2Basal_leftShift.clear(); tmpCellCenterLine_Apical2Basal_rightShift.clear(); tmpCellCenterLine_MirrorLength_Apical2Basal.clear(); bool firstloop_Basal2Apical = true; bool firstloop_Apical2Basal = true; bool lateralA_earlyShift = false; bool lateralB_earlyShift = false; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index, nextIndex; int lateralACount = 0; int lateralBCount = 0; int initLateralACount = 0; int initLateralBCount = 0; std::cout<<"i = "<<i<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpNodeType[index]==lateralA){ initLateralACount+=1; } if (divAuxData.tmpNodeType[index]==lateralB){ initLateralBCount+=1; } } std::cout<<"initLateralACount = "<<initLateralACount<<std::endl; std::cout<<"initLateralBCount = "<<initLateralBCount<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { // std::cout<<"j = "<<j<<std::endl; index = i * maxAllNodePerCell + j; nextIndex = index + 1; if (nextIndex >= i*maxAllNodePerCell+maxAllNodePerCell){ nextIndex = i*maxAllNodePerCell + 0; } if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; // std::cout<<"posX = "<<posX<<std::endl; double posY = divAuxData.tmpNodePosY_M[index]; // std::cout<<"posY = "<<posY<<std::endl; MembraneType1 nodeTypeI = divAuxData.tmpNodeType[index]; // std::cout<<"nodetype = "<<nodeTypeI<<std::endl; // std::cout<<"nodeTypeI = "<<nodeTypeI<<std::endl; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; CVector cellCenterLinePos; CVector cellCenterLinePos_leftUnitDir; CVector cellCenterLinePos_rightUnitDir; //Since we start by using nodes with label 'lateralA', the resulting cellCenterLinePos shoould orient from basal to apical. //But since mother and daughter cell will go through this center line in different orientation (even though both cells use //counterclockwise orientation). //Kevin if (nodeTypeI == lateralA){ // int mirrorNode = nodes->getInfoVecs().nodeAdhereIndex[j]; // std::cout<<"divAuxData.tmpNodeMemMirrorIndex_M size = "<<divAuxData.tmpNodeMemMirrorIndex_M.size()<<std::endl; cellCenterLinePos.x = (divAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (divAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; // std::cout<<"cellCenterLinePos = "<<cellCenterLinePos.x<<" "<<cellCenterLinePos.y<<std::endl; double length = sqrt((divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]) + (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLinePos_rightUnitDir.x = divAuxData.tmpNodePosX_M[index] - cellCenterLinePos.x; // std::cout<<"tmpNodePosX = "<<divAuxData.tmpNodePosX_M[index]<<" , cellCenterLinePos.x = "<<cellCenterLinePos.x<<std::endl; cellCenterLinePos_rightUnitDir.y = divAuxData.tmpNodePosY_M[index] - cellCenterLinePos.y; // std::cout<<"tmpNodePosY = "<<divAuxData.tmpNodePosY_M[index]<<" , cellCenterLinePos.y = "<<cellCenterLinePos.y<<std::endl; double rightLength = sqrt(cellCenterLinePos_rightUnitDir.x*cellCenterLinePos_rightUnitDir.x + cellCenterLinePos_rightUnitDir.y*cellCenterLinePos_rightUnitDir.y); // std::cout<<"rightLength = "<<rightLength<<std::endl; cellCenterLinePos_rightUnitDir.x = cellCenterLinePos_rightUnitDir.x/rightLength; cellCenterLinePos_rightUnitDir.y = cellCenterLinePos_rightUnitDir.y/rightLength; cellCenterLinePos_leftUnitDir.x = nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.x; // std::cout<<"nodeLocX[tmpNodeMirrorIndex] = "<<nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]<<" , cellCenterLinePos.x = "<<cellCenterLinePos.x<<std::endl; cellCenterLinePos_leftUnitDir.y = nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.y; // std::cout<<"nodeLocY[tmpNodeMirrorIndex] = "<<nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]]<<" , cellCenterLinePos.y = "<<cellCenterLinePos.y<<std::endl; double leftLength = sqrt(cellCenterLinePos_leftUnitDir.x*cellCenterLinePos_leftUnitDir.x + cellCenterLinePos_leftUnitDir.y*cellCenterLinePos_leftUnitDir.y); // std::cout<<"leftLength = "<<leftLength<<std::endl; cellCenterLinePos_leftUnitDir.x = cellCenterLinePos_leftUnitDir.x/leftLength; cellCenterLinePos_leftUnitDir.y = cellCenterLinePos_leftUnitDir.y/leftLength; cellCenterLine_Basal2Apical.push_back(cellCenterLinePos); lateralACount+=1; cellCenterLine_Basal2Apical_leftShift.push_back(cellCenterLinePos_leftUnitDir); // std::cout<<"cellCenterLine Basal2Apical left shift: "<<cellCenterLinePos_leftUnitDir.x<<" "<<cellCenterLinePos_leftUnitDir.y<<std::endl; cellCenterLine_Basal2Apical_rightShift.push_back(cellCenterLinePos_rightUnitDir); // std::cout<<"cellCenterLine Basal2Apical right shift: "<<cellCenterLinePos_rightUnitDir.x<<" "<<cellCenterLinePos_rightUnitDir.y<<std::endl; cellCenterLine_MirrorLength_Basal2Apical.push_back(length); if (firstloop_Basal2Apical == true){ tmpCellCenterLine_Basal2Apical.push_back(cellCenterLinePos); tmpCellCenterLine_Basal2Apical_leftShift.push_back(cellCenterLinePos_leftUnitDir); tmpCellCenterLine_Basal2Apical_rightShift.push_back(cellCenterLinePos_rightUnitDir); tmpCellCenterLine_MirrorLength_Basal2Apical.push_back(length); } if (nodeTypeI == lateralA && divAuxData.tmpNodeType[nextIndex] != lateralA){ // std::cout<<"Node type shift away from lateralA."<<std::endl; //This means that we hit a transition point from lateralA to apical1. Need to make sure if we have covered all lateralA nodes or not. //If not, this means that we started counting from the middle of lateralA nodes, which will causes serious problem later. //Hence we will try to remedy this issue here //Kevin if (lateralACount != initLateralACount){ lateralA_earlyShift = true; // std::cout<<"Node type shift away from lateralA before covering all lateralA type node."<<std::endl; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Basal2Apical_leftShift.clear(); cellCenterLine_Basal2Apical_rightShift.clear(); cellCenterLine_MirrorLength_Basal2Apical.clear(); } firstloop_Basal2Apical = false; // std::cout<<"firstloop_Basal2Apical = "<<firstloop_Basal2Apical<<std::endl; } } if (nodeTypeI == lateralB){ // int mirrorNode = nodes->getInfoVecs().nodeAdhereIndex[j]; cellCenterLinePos.x = (divAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (divAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]]) + (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])* (divAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLinePos_leftUnitDir.x = divAuxData.tmpNodePosX_M[index] - cellCenterLinePos.x; cellCenterLinePos_leftUnitDir.y = divAuxData.tmpNodePosY_M[index] - cellCenterLinePos.y; double leftLength = sqrt(cellCenterLinePos_leftUnitDir.x*cellCenterLinePos_leftUnitDir.x + cellCenterLinePos_leftUnitDir.y*cellCenterLinePos_leftUnitDir.y); cellCenterLinePos_leftUnitDir.x = cellCenterLinePos_leftUnitDir.x/leftLength; cellCenterLinePos_leftUnitDir.y = cellCenterLinePos_leftUnitDir.y/leftLength; cellCenterLinePos_rightUnitDir.x = nodes->getInfoVecs().nodeLocX[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.x; cellCenterLinePos_rightUnitDir.y = nodes->getInfoVecs().nodeLocY[divAuxData.tmpNodeMemMirrorIndex_M[index]] - cellCenterLinePos.y; double rightLength = sqrt(cellCenterLinePos_rightUnitDir.x*cellCenterLinePos_rightUnitDir.x + cellCenterLinePos_rightUnitDir.y*cellCenterLinePos_rightUnitDir.y); cellCenterLinePos_rightUnitDir.x = cellCenterLinePos_rightUnitDir.x/rightLength; cellCenterLinePos_rightUnitDir.y = cellCenterLinePos_rightUnitDir.y/rightLength; cellCenterLine_Apical2Basal.push_back(cellCenterLinePos); lateralBCount+=1; cellCenterLine_Apical2Basal_leftShift.push_back(cellCenterLinePos_leftUnitDir); // std::cout<<"cellCenterLine Apical2Basal left shift: "<<cellCenterLinePos_leftUnitDir.x<<" "<<cellCenterLinePos_leftUnitDir.y<<std::endl; cellCenterLine_Apical2Basal_rightShift.push_back(cellCenterLinePos_rightUnitDir); // std::cout<<"cellCenterLine Apical2Basal right shift: "<<cellCenterLinePos_rightUnitDir.x<<" "<<cellCenterLinePos_rightUnitDir.y<<std::endl; cellCenterLine_MirrorLength_Apical2Basal.push_back(length); if (firstloop_Apical2Basal == true){ tmpCellCenterLine_Apical2Basal.push_back(cellCenterLinePos); tmpCellCenterLine_Apical2Basal_leftShift.push_back(cellCenterLinePos_leftUnitDir); tmpCellCenterLine_Apical2Basal_rightShift.push_back(cellCenterLinePos_rightUnitDir); tmpCellCenterLine_MirrorLength_Apical2Basal.push_back(length); } if (nodeTypeI == lateralB && divAuxData.tmpNodeType[nextIndex] != lateralB){ // std::cout<<"Node type shift away from lateralB."<<std::endl; //This means that we hit a transition point from lateralA to apical1. Need to make sure if we have covered all lateralA nodes or not. //If not, this means that we started counting from the middle of lateralA nodes, which will causes serious problem later. //Hence we will try to remedy this issue here //Kevin if (lateralBCount != initLateralBCount){ lateralB_earlyShift = true; // std::cout<<"Node type shift away from lateralB before covering all lateralB type node."<<std::endl; cellCenterLine_Apical2Basal.clear(); cellCenterLine_Apical2Basal_leftShift.clear(); cellCenterLine_Apical2Basal_rightShift.clear(); cellCenterLine_MirrorLength_Apical2Basal.clear(); } firstloop_Apical2Basal = false; // std::cout<<"firstloop_Apical2Basal = "<<firstloop_Apical2Basal<<std::endl; } } } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } if (lateralA_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Basal2Apical.size(); p++){ cellCenterLine_Basal2Apical.push_back(tmpCellCenterLine_Basal2Apical[p]); cellCenterLine_Basal2Apical_leftShift.push_back(tmpCellCenterLine_Basal2Apical_leftShift[p]); cellCenterLine_Basal2Apical_rightShift.push_back(tmpCellCenterLine_Basal2Apical_rightShift[p]); cellCenterLine_MirrorLength_Basal2Apical.push_back(tmpCellCenterLine_MirrorLength_Basal2Apical[p]); } } // std::cout<<"cellCenterLine_Basal2Apical size = "<<cellCenterLine_Basal2Apical.size()<<std::endl; // std::cout<<"cellCenterLine_Basal2Apical_leftShift size = "<<cellCenterLine_Basal2Apical_leftShift.size()<<std::endl; // std::cout<<"cellCenterLine_Basal2Apical_rightShift size = "<<cellCenterLine_Basal2Apical_rightShift.size()<<std::endl; // std::cout<<"cellCenterLine_MirrorLength_Basal2Apical size = "<<cellCenterLine_MirrorLength_Basal2Apical.size()<<std::endl; if (lateralB_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Apical2Basal.size(); p++){ cellCenterLine_Apical2Basal.push_back(tmpCellCenterLine_Apical2Basal[p]); cellCenterLine_Apical2Basal_leftShift.push_back(tmpCellCenterLine_Apical2Basal_leftShift[p]); cellCenterLine_Apical2Basal_rightShift.push_back(tmpCellCenterLine_Apical2Basal_rightShift[p]); cellCenterLine_MirrorLength_Apical2Basal.push_back(tmpCellCenterLine_MirrorLength_Apical2Basal[p]); } } // std::cout<<"cellCenterLine_Apical2Basal size = "<<cellCenterLine_Apical2Basal.size()<<std::endl; // std::cout<<"cellCenterLine_Apical2Basal_leftShift size = "<<cellCenterLine_Apical2Basal_leftShift.size()<<std::endl; // std::cout<<"cellCenterLine_Apical2Basal_rightShift size = "<<cellCenterLine_Apical2Basal_rightShift.size()<<std::endl; // std::cout<<"cellCenterLine_MirrorLength_Apical2Basal size = "<<cellCenterLine_MirrorLength_Apical2Basal.size()<<std::endl; //Now we check to see if the cellCenterLine data structure is built correctly. The two should be identical except with opposite ordering. } void SceCells::obtainMembrAndIntnlNodesPlusNodeType2_printingOnly(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes, vector<MembraneType1> & nodeTypeIndxDiv, vector<CVector>& cellCenterLine_Basal2Apical, vector<CVector>& cellCenterLine_Apical2Basal) { membrNodes.clear(); intnlNodes.clear(); nodeTypeIndxDiv.clear() ; cellCenterLine_Basal2Apical.clear(); cellCenterLine_Apical2Basal.clear(); vector<CVector> tmpCellCenterLine_Basal2Apical; vector<CVector> tmpCellCenterLine_Apical2Basal; tmpCellCenterLine_Basal2Apical.clear(); tmpCellCenterLine_Apical2Basal.clear(); bool firstloop_Basal2Apical = true; bool firstloop_Apical2Basal = true; bool lateralA_earlyShift = false; bool lateralB_earlyShift = false; uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index, nextIndex; int lateralACount = 0; int lateralBCount = 0; int initLateralACount = 0; int initLateralBCount = 0; std::cout<<"i = "<<i<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (printAuxData.tmpNodeType[index]==lateralA){ initLateralACount+=1; } if (printAuxData.tmpNodeType[index]==lateralB){ initLateralBCount+=1; } } std::cout<<"initLateralACount_printOnly = "<<initLateralACount<<std::endl; std::cout<<"initLateralBCount_printOnly = "<<initLateralBCount<<std::endl; for (uint j = 0; j < maxAllNodePerCell; j++) { // std::cout<<"j = "<<j<<std::endl; index = i * maxAllNodePerCell + j; nextIndex = index + 1; if (nextIndex >= i*maxAllNodePerCell+maxAllNodePerCell){ nextIndex = i*maxAllNodePerCell + 0; } if (printAuxData.tmpIsActive_M[index] != true) { continue; } // std::cout<<"debug 1"<<std::endl; double posX = printAuxData.tmpNodePosX_M[index]; double posY = printAuxData.tmpNodePosY_M[index]; MembraneType1 nodeTypeI = printAuxData.tmpNodeType[index]; // std::cout<<"debug 2"<<std::endl; if (j < membThreshold) { CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); nodeTypeIndxDiv.push_back(nodeTypeI) ; CVector cellCenterLinePos; CVector cellCenterLinePos_leftUnitDir; CVector cellCenterLinePos_rightUnitDir; if (nodeTypeI == lateralA){ cellCenterLinePos.x = (printAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (printAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]]) + (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLine_Basal2Apical.push_back(cellCenterLinePos); lateralACount+=1; if (firstloop_Basal2Apical == true){ tmpCellCenterLine_Basal2Apical.push_back(cellCenterLinePos); } if (nodeTypeI == lateralA && printAuxData.tmpNodeType[nextIndex] != lateralA){ if (lateralACount != initLateralACount){ lateralA_earlyShift = true; cellCenterLine_Basal2Apical.clear(); } firstloop_Basal2Apical = false; } } if (nodeTypeI == lateralB){ cellCenterLinePos.x = (printAuxData.tmpNodePosX_M[index] + nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; cellCenterLinePos.y = (printAuxData.tmpNodePosY_M[index] + nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])/2.0; double length = sqrt((printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosX_M[index] - nodes->getInfoVecs().nodeLocX[printAuxData.tmpNodeMemMirrorIndex_M[index]]) + (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])* (printAuxData.tmpNodePosY_M[index] - nodes->getInfoVecs().nodeLocY[printAuxData.tmpNodeMemMirrorIndex_M[index]])); cellCenterLine_Apical2Basal.push_back(cellCenterLinePos); lateralBCount+=1; if (firstloop_Apical2Basal == true){ tmpCellCenterLine_Apical2Basal.push_back(cellCenterLinePos); } if (nodeTypeI == lateralB && printAuxData.tmpNodeType[nextIndex] != lateralB){ if (lateralBCount != initLateralBCount){ lateralB_earlyShift = true; cellCenterLine_Apical2Basal.clear(); } firstloop_Apical2Basal = false; } } } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } // std::cout<<"debug 3"<<std::endl; } if (lateralA_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Basal2Apical.size(); p++){ cellCenterLine_Basal2Apical.push_back(tmpCellCenterLine_Basal2Apical[p]); } } if (lateralB_earlyShift == true){ for (int p = 0; p < tmpCellCenterLine_Apical2Basal.size(); p++){ cellCenterLine_Apical2Basal.push_back(tmpCellCenterLine_Apical2Basal[p]); } } } /* Ali CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } */ CVector SceCells::obtainCellCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::obtainNucleusCenter(uint i, vector<CVector> IntnlNodes){ for (int j = 0; j < IntnlNodes.size(); j++){ divAuxData.tmpNucleusCenterPosX_M[i] += IntnlNodes[j].x; divAuxData.tmpNucleusCenterPosY_M[i] += IntnlNodes[j].y; } double oldCenterX = divAuxData.tmpNucleusCenterPosX_M[i]/IntnlNodes.size(); double oldCenterY = divAuxData.tmpNucleusCenterPosY_M[i]/IntnlNodes.size(); CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } //Kevin CVector SceCells::obtainIntCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } /* CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } */ CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double minDiff = 10000; CVector minorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff < minDiff) { minDiff = diff; minorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = minDiff; return minorAxisDir; } CVector SceCells::calDivDir_ApicalBasal(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis, vector<MembraneType1> & nodeTypeIndxDiv) { // not the optimal algorithm but easy to code double minDiff = 10000; CVector minorAxisDir; int minPointAdhIndex ; int maxPointAdhIndex; //for (uint i = 0; i < membrNodes.size(); i++) { // cout <<"adhesion index for dividing cell node"<<i<<"is" << adhIndxDiv[i] <<endl; // } //return 0 ; for (uint i = 0; i < membrNodes.size(); i++) { if ( (nodeTypeIndxDiv[i]!=lateralA) && (nodeTypeIndxDiv[i]!=lateralB) ) { continue ; } CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; //distance finder for node i to the opposite nodes //Ali for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; // minimum distance finder for each cells to be used for cell center shifting. It should also need to be a node that have neighbor if (diff < minDiff ) { minDiff = diff; minorAxisDir = tmpUnitDir; // adhesionIndexFinal=adhIndxDiv[i]; } } lenAlongMajorAxis = minDiff; return minorAxisDir; } std::pair <int ,int> SceCells::calApicalBasalRingIds(CVector divDir, CVector center,vector<CVector>& membrNodes, vector<MembraneType1> & nodeTypeIndxDiv) { int idMin, idMax ; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; CVector tmpUnitDir2 = tmpDir2.getUnitVector(); double tmpVecProduct = splitDir * tmpUnitDir2; if (tmpVecProduct < min) { min = tmpVecProduct; idMin=j ; } if (tmpVecProduct > max) { max = tmpVecProduct; idMax=j ; } } cout << " contractile node location is " << membrNodes[idMin].x << " ," << membrNodes[idMin].y << endl ; cout << " contractile node location is " << membrNodes[idMax].x << " ," << membrNodes[idMax].y << endl ; if (nodeTypeIndxDiv[idMin]==apical1) { return make_pair(idMin,idMax) ; } else { return make_pair(idMax,idMin) ; } } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector cellCenter, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - cellCenter; //it is cell center CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; // it is minor axis } void SceCells::obtainTwoNewIntCenters(CVector& oldIntCenter, CVector& divDir, double len_MajorAxis, CVector& intCenterNew1, CVector& intCenterNew2) { CVector divDirUnit = divDir.getUnitVector(); std::cout<<"divDirUnit = "<<divDirUnit.x<<" "<<divDirUnit.y<<std::endl; double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; // this means small axis intCenterNew1 = oldIntCenter + lenChange * divDirUnit; // it should be nucleus center intCenterNew2 = oldIntCenter - lenChange * divDirUnit; // it should be nulceus center CVector centerTissue ; //Ali centerTissue=CVector (40.0, 40.0, 0.0) ; //Ali should be imported CVector tmpVec1=intCenterNew1-centerTissue ; //Ali // assuming New1 is mother cell CVector tmpVec2=intCenterNew2-centerTissue ; //Ali CVector tmpDiff=tmpVec2-tmpVec1 ; CVector tmpCross=Cross(tmpVec1,tmpVec2) ; //Ali bool isMotherCellBehindInt=false ; //Ali // assuming CCW is the initial order of cell ranks //if (tmpCross.z>0){ // if (tmpDiff.x>0){ isMotherCellBehindInt=true ; // } //Ali std::cout<<"isMotherCellBehindInt = "<<isMotherCellBehindInt<<std::endl; divAuxData.isMotherCellBehind.push_back(isMotherCellBehindInt) ; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCellCenter,CVector oldIntCenter ,std::vector<VecValT>& tmp1, std::vector<VecValT>& tmp2) { tmp1.clear(); // is for membrane node of first cell tmp2.clear(); // is for membrane node of the second cell uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecValT tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); std::cout<<"splitDir = "<<splitDir.x<<" "<<splitDir.y<<std::endl; std::vector<double> oldCellCenter2BasalVec(2,0.0); std::vector<double> oldCellCenter2ApicalVec(2,0.0); oldCellCenter2BasalVec[0] = divAuxData.tmpBasalLoc[0] - oldCellCenter.x; oldCellCenter2BasalVec[1] = divAuxData.tmpBasalLoc[1] - oldCellCenter.y; oldCellCenter2ApicalVec[0] = divAuxData.tmpApicalLoc[0] - oldCellCenter.x; oldCellCenter2ApicalVec[1] = divAuxData.tmpApicalLoc[1] - oldCellCenter.y; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCellCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; tmpData.type=divAuxData.tmpNodeType[index] ; if (divAuxData.tmpNodeType[index] == lateralA){ tmp2.push_back(tmpData); } else if (divAuxData.tmpNodeType[index] == lateralB){ tmp1.push_back(tmpData); } else if (divAuxData.tmpNodeType[index] == apical1){ double crossProduct_withApical = centerToPosDir.x*oldCellCenter2ApicalVec[1] - centerToPosDir.y*oldCellCenter2ApicalVec[0]; // double crossProduct_withBasal = centerToPosDir.x*oldCellCenter2BasalVec[1] - centerToPosDir.y*oldCellCenter2BasalVec[0]; if (crossProduct_withApical >= 0){ tmp2.push_back(tmpData); } else if (crossProduct_withApical < 0){ tmp1.push_back(tmpData); } else {std::cout<<"None of the condition to determine if an apical node belongs to daughter cell or mother cell is met! Something is wrong!"<<std::endl;} } else if (divAuxData.tmpNodeType[index] == basal1){ // double crossProduct_withApical = centerToPosDir.x*oldCellCenter2ApicalVec[1] - centerToPosDir.y*oldCellCenter2ApicalVec[0] ; double crossProduct_withBasal = centerToPosDir.x*oldCellCenter2BasalVec[1] - centerToPosDir.y*oldCellCenter2BasalVec[0]; if (crossProduct_withBasal < 0){ tmp2.push_back(tmpData); } else if (crossProduct_withBasal >= 0){ tmp1.push_back(tmpData); } else {std::cout<<"None of the condition to determine if a basal node belongs to daughter cell or mother cell is met! Something is wrong!"<<std::endl;} } else{ std::cout<<"Active membrane node not assigned with any node type is present! Something is wrong!"<<std::endl; } /*CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCellCenter; // Ali it should be center of cells CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir;//This is the original way to determine which node belongs to which node belongs to mother and daughter cell // double dotProduct = centerToPosUnit.getUnitVector() * splitDir.getUnitVector(); // double dotProduct2 = centerToPosDir * divDir; tmpData.val = dotProduct; // for sorting the membrane nodes tmpData.vec = memPos; tmpData.type=divAuxData.tmpNodeType[index] ; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); }*/ //This is the original way to determine which node belongs to which node belongs to mother and daughter cell //Kevin // if (dotProduct2 >= 0){ // tmp1.push_back(tmpData); // } // else{ // tmp2.push_back(tmpData); // } } } else {// shrink the internal nodes around the internal node center if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldIntCenter; // center of nucleus is more biological CVector shrinkedPos = centerToPosDir * shrinkRatio + oldIntCenter; // CVector unitDivDir = divDir.getUnitVector(); // Ali // double AmpTanget=centerToPosDir*unitDivDir ; // Ali dot product of two vectors // double shrinkedAmpTanget=shrinkRatio*AmpTanget; // multiply two doubles //Ali // CVector TangetVShrink=unitDivDir*shrinkedAmpTanget; // shrink the tanget component //Ali // CVector TangetV= unitDivDir* AmpTanget; // get the tanget component to compute the normal vector //Ali // CVector NormV=centerToPosDir-TangetV ; // compute the normal vector Ali // CVector polarShrinkedPos=NormV+TangetVShrink ; // summation of shrinked tanget and as previous vector in the normal direction to division axis//Ali // CVector updatedV=polarShrinkedPos+oldCenter ; //Ali double dotProduct = centerToPosDir * divDir; //double dotProduct = polarShrinkedPos * divDir; //Ali if (dotProduct >= 0) { if (divAuxData.tmp1IntnlVec.size() >= 24){ divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } else{ divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } } else { if (divAuxData.tmp2IntnlVec.size() >= 24){ divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else{ divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } for (int i = 0; i < tmp1.size(); i++){ // std::cout<<"tmp1 ["<<i<<"] "<<tmp1[i].vec.x<<" "<<tmp1[i].vec.y<<" "<<tmp1[i].val<<" "<<tmp1[i].type<< std::endl; } for (int i = 0; i < tmp2.size(); i++){ // std::cout<<"tmp2 ["<<i<<"] "<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<" "<<tmp2[i].val<<" "<<tmp2[i].type<< std::endl; } int targetId_tmp2; for (int i = 0; i < tmp2.size(); i++){ int iNext = i+1; if (i == (tmp2.size()-1)){ iNext = 0; } // std::cout<<"tmp2 i = "<<i<<std::endl; // std::cout<<"temp2 iNext = "<<iNext<<std::endl; double dotProduct = ((tmp2[i].vec.x - oldCellCenter.x) * (tmp2[iNext].vec.x - oldCellCenter.x)) + (tmp2[i].vec.y - oldCellCenter.y) * (tmp2[iNext].vec.y - oldCellCenter.y) ; if (dotProduct < 0){ targetId_tmp2 = i; std::cout<<"i for dotProduct < 0, tmp2 = "<<i<<std::endl; } } // std::cout<<"tmp2 before rearranging"<<std::endl; // for (int i = 0; i < tmp2.size(); i++){ // std::cout<<tmp2[i].vec.x<<" "<<tmp2[i].vec.y<<std::endl; // } if (targetId_tmp2 != (tmp2.size()-1)){ vector<VecValT> tmp_tmp2; int currentId = targetId_tmp2+1; for (int i = 0; i<tmp2.size(); i++){ tmp_tmp2.push_back(tmp2[currentId]); currentId += 1; if (currentId >= tmp2.size()){ currentId = currentId - tmp2.size(); } } tmp2.clear(); for (int i = 0; i < tmp_tmp2.size(); i++){ tmp2.push_back(tmp_tmp2[i]); } } else{ std::cout<<"No need to reorganize the order of tmp2Membr before adding new nodes"<<std::endl; } int targetId_tmp1; for (int i = 0; i < tmp1.size(); i++){ int iNext = i+1; if (i == (tmp1.size()-1)){ iNext = 0; } double dotProduct = (tmp1[i].vec.x - oldCellCenter.x) * (tmp1[iNext].vec.x - oldCellCenter.x) + (tmp1[i].vec.y - oldCellCenter.y) * (tmp1[iNext].vec.y - oldCellCenter.y) ; if (dotProduct < 0){ targetId_tmp1 = i; std::cout<<"i for dotProduct < 0, tmp1 = "<<i<<std::endl; } } if (targetId_tmp1 != (tmp1.size()-1)){ vector<VecValT> tmp_tmp1; int currentId = targetId_tmp1+1; for (int i = 0; i<tmp1.size(); i++){ tmp_tmp1.push_back(tmp1[currentId]); currentId += 1; if (currentId >= tmp1.size()){ currentId = currentId - tmp1.size(); } } tmp1.clear(); for (int i = 0; i < tmp_tmp1.size(); i++){ tmp1.push_back(tmp_tmp1[i]); } } else{ std::cout<<"No need to reorganize the order of tmp1Membr before adding new nodes"<<std::endl; } // oldCellCenter2BasalVec[0] = divAuxData.tmpBasalLoc[0] - oldCellCenter.x; // oldCellCenter2BasalVec[1] = divAuxData.tmpBasalLoc[1] - oldCellCenter.y; // double dotProduct_withBasal = -10000.0; // int targetId_tmp2 = -100; // for (int i = 0; i < tmp2.size(); i++){ // CVector tmp2_vec = tmp2[i].vec - oldCellCenter; // double tmp2_dotProduct_withBasal = tmp2_vec.x*oldCellCenter2BasalVec[0] + tmp2_vec.y*oldCellCenter2BasalVec[1]; // if (tmp2_dotProduct_withBasal >= dotProduct_withBasal){ // dotProduct_withBasal = tmp2_dotProduct_withBasal; // targetId_tmp2 = i; // } // } // vector<VecValT> tmp_tmp2; // int currentId = targetId_tmp2; // for (int i = 0; i<tmp2.size(); i++){ // tmp_tmp2.push_back(tmp2[currentId]); // currentId += 1; // if (currentId >= tmp2.size()){ // currentId = currentId - tmp2.size(); // } // } // tmp2.clear(); // for (int i = 0; i < tmp_tmp2.size(); i++){ // tmp2.push_back(tmp_tmp2[i]); // } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //Ali added to calculate pressure of each cell void SceCells::calCellPressure() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0) ; // if the membrane node is not active or if this is an internal node it will automatically add with zero. thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, nodes->getInfoVecs().nodeF_MI_M_N.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.sumF_MI_M_N.begin()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell))+ totalNodeCountForActiveCells, nodes->getInfoVecs().lagrangeFN.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.sumLagrangeFN.begin()); thrust:: transform(cellInfoVecs.sumF_MI_M_N.begin(), cellInfoVecs.sumF_MI_M_N.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.sumLagrangeFN.begin(), cellInfoVecs.cellPressure.begin(),thrust::plus<float>()) ; thrust:: transform(cellInfoVecs.cellPressure.begin(), cellInfoVecs.cellPressure.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPressure.begin(),thrust::divides<float>()) ; } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); for (int k = 0; k < cellInfoVecs.cellAreaVec.size(); k++){ if (cellInfoVecs.cellAreaVec[k] < 0){ cellInfoVecs.cellAreaVec[k] = -1.0*cellInfoVecs.cellAreaVec[k]; } } // !!!!!NOTE!!!!!! this is currently an ad hoc way to make sure area of each triangle is positive. cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI calCellPressure() ; // Ali //computeBasalLoc(); //Ali we call it here to compute the length of the cells CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy // thrust::host_vector<int> adhIndxHost = // nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhereIndex; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); ////////////// thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); ///////////// // std::cout<<"ISOLATE 1"<<std::endl; /////////////// thrust::host_vector<double> apicalLocXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> apicalLocYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.apicalLocX.begin(), cellInfoVecs.apicalLocX.begin() + allocPara_m.currentActiveCellCount, apicalLocXHost.begin()); thrust::copy(cellInfoVecs.apicalLocY.begin(), cellInfoVecs.apicalLocY.begin() + allocPara_m.currentActiveCellCount, apicalLocYHost.begin()); /////////// // std::cout<<"ISOLATE 2"<<std::endl; /////////////// thrust::host_vector<double> basalLocXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> basalLocYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.basalLocX.begin(), cellInfoVecs.basalLocX.begin() + allocPara_m.currentActiveCellCount, basalLocXHost.begin()); thrust::copy(cellInfoVecs.basalLocY.begin(), cellInfoVecs.basalLocY.begin() + allocPara_m.currentActiveCellCount, basalLocYHost.begin()); /////////// // std::cout<<"ISOLATE 3"<<std::endl; ////// thrust::host_vector<double> InternalAvgXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalAvgYHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMaxXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMaxYHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMinXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> InternalMinYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.InternalAvgX.begin(), cellInfoVecs.InternalAvgX.begin() + allocPara_m.currentActiveCellCount, InternalAvgXHost.begin()); thrust::copy(cellInfoVecs.InternalAvgY.begin(), cellInfoVecs.InternalAvgY.begin() + allocPara_m.currentActiveCellCount, InternalAvgYHost.begin()); ///// // std::cout<<"ISOLATE 4"<<std::endl; thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellPressureHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cellPressure.begin(), cellInfoVecs.cellPressure.begin() + allocPara_m.currentActiveCellCount, cellPressureHost.begin());//Ali // std::cout<<"ISOLATE 4"<<std::endl; //LOOKS LIKE AN ERROR POP UP IN THIS SECTION.............. sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali // std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali // std::cout << "printing adhesion indicies 2"; for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; // std::cout << "printing adhesion indicies 3"; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } // std::cout << "printing adhesion indicies 4"; } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } // std::cout << "printing adhesion indicies 5"; } } // std::cout<<"ISOLATE 4.5"<<std::endl; cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellApicalLoc= CVector(apicalLocXHost[i], apicalLocYHost[i], 0); //Ali cellStatsData.cellBasalLoc= CVector(basalLocXHost[i], basalLocYHost[i], 0); //Ali cellStatsData.cellNucleusLoc = CVector(InternalAvgXHost[i], InternalAvgYHost[i], 0); // Ali cellStatsData.cellNucleusLocMax = CVector(InternalMaxXHost[i], InternalMaxYHost[i], 0); cellStatsData.cellNucleusLocMin = CVector(InternalMinXHost[i], InternalMinYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellPressure = cellPressureHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } // std::cout<<"ISOLATE 5"<<std::endl; //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } // std::cout<<"ISOLATE 6"<<std::endl; return result; } SingleCellData SceCells::OutputStressStrain() { SingleCellData result ; vector <double> nodeExtForceXHost; vector <double> nodeExtForceYHost; nodeExtForceXHost.resize(totalNodeCountForActiveCells); nodeExtForceYHost.resize(totalNodeCountForActiveCells); thrust::copy ( nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceX.begin()+ totalNodeCountForActiveCells, nodeExtForceXHost.begin()); thrust::copy ( nodes->getInfoVecs().nodeExtForceY.begin(), nodes->getInfoVecs().nodeExtForceY.begin()+ totalNodeCountForActiveCells, nodeExtForceYHost.begin()); // There is a compiling issue with using count_if on GPU. int numPositiveForces = count_if(nodeExtForceXHost.begin(),nodeExtForceXHost.end(),isGreaterZero() ) ; double totalExtPositiveForce =accumulate(nodeExtForceXHost.begin(),nodeExtForceXHost.end(),0.0, SumGreaterZero() ) ; cout << "number of positive external forces are=" <<numPositiveForces<<endl ; cout << "Total external forces are=" <<totalExtPositiveForce<<endl ; //thrust::device_vector<double>::iterator double MinX=*thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; //thrust::device_vector<double>::iterator double MaxX=*thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; result.Cells_Extrem_Loc[0]=MinX; result.Cells_Extrem_Loc[1]=MaxX; result.F_Ext_Out=totalExtPositiveForce ; return result ; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { //double equAngle = PI - PI / activeMembrCt; double equAngle = PI ; // - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } __device__ double CalMembrBendSpringEnergy(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return ( 0.5*bendCoeff * (angle - equAngle)*(angle - equAngle) ); } else{ return ( 0.5*bendCoeff * (angle - equAngle)*(angle - equAngle)* (bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)) ); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin(),// ALi added for cell pressure calculation nodes->getInfoVecs().nodeIIEnergy.begin(), nodes->getInfoVecs().nodeIMEnergy.begin())), AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M, isEnteringMitotic)); /* for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++) { cout << "for cell rank "<<i<< " cell apical location is " << cellInfoVecs.apicalLocX[i] <<" , " <<cellInfoVecs.apicalLocY[i] << endl ; cout << "for cell rank "<<i<< " cell nucleus distance from apical is " << cellInfoVecs.nucDesireDistApical[i] << endl ; } */ } // void SceCells::applyMembContraction() { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; // uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; // thrust::counting_iterator<uint> iBegin2(0); // double* nodeLocXAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeLocYAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().nodeF_MM_C_X.begin(), // nodes->getInfoVecs().nodeF_MM_C_Y.begin(), // nodes->getInfoVecs().nodeContractEnergyT.begin(), // nodes->getInfoVecs().basalContractPair.begin())), // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr)); // } // void SceCells::applyMembContraction(double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_Division, double distFromNucleus_max, double distFromNucleus_min, double percentage_before_timeRatio_Crit_Division_scaling) { // totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount // * allocPara_m.maxAllNodePerCell; // uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; // uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; // thrust::counting_iterator<uint> iBegin2(0); // double* nodeLocXAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocX[0])); // double* nodeLocYAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocY[0])); // // double* nodeLocZAddr = thrust::raw_pointer_cast( // // &(nodes->getInfoVecs().nodeLocZ[0])); // /*double* ActomyosinMultipBasal = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeActomyosinMultip_basal[0])); // double* ActomyosinMultipApical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeActomyosinMultip_apical[0]));*/ // MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( // &(nodes->getInfoVecs().memNodeType1[0])); // int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // double* contractActomyo_multip = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip[0])); // double* contractActomyo_multip_apical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip_apical[0])); // bool* isEnteringMitotic = thrust::raw_pointer_cast( // &(cellInfoVecs.isEnteringMitotic[0])); // double* contractileSpringGrowthProgress = thrust::raw_pointer_cast( // &(cellInfoVecs.contractileSpringGrowthProgress[0])); // double* distFromNucleus_normal = thrust::raw_pointer_cast( // &(cellInfoVecs.distFromNucleus_normal[0])); // double* distFromNucleus_normal_apical = thrust::raw_pointer_cast( // &(cellInfoVecs.distFromNucleus_normal_apical[0])); // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // thrust::make_permutation_iterator( // cellInfoVecs.nucDesireDistApical.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.activeMembrNodeCounts.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocX.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // thrust::make_permutation_iterator( // cellInfoVecs.apicalLocY.begin(), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell))), // make_transform_iterator(iBegin2, // DivideFunctor(maxAllNodePerCell)), // make_transform_iterator(iBegin2, // ModuloFunctor(maxAllNodePerCell)), // nodes->getInfoVecs().nodeIsActive.begin(), // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().memNodeType1.begin())) // + totalNodeCountForActiveCells, // thrust::make_zip_iterator( // thrust::make_tuple( // nodes->getInfoVecs().nodeVelX.begin(), // nodes->getInfoVecs().nodeVelY.begin(), // nodes->getInfoVecs().nodeF_MM_C_X.begin(), // nodes->getInfoVecs().nodeF_MM_C_Y.begin(), // nodes->getInfoVecs().nodeContractEnergyT.begin(), // nodes->getInfoVecs().basalContractPair.begin())), // // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, ActomyosinMultipBasal,ActomyosinMultipApical, nodeTypeAddr,nodeMemMirrorIndexAddr, // // timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division)); // AddMemContractForce_tmp(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr, // timeRatio, contractActomyo_multip, contractActomyo_multip_apical, distFromNucleus_max, distFromNucleus_min, distFromNucleus_normal, distFromNucleus_normal_apical, percentage_before_timeRatio_Crit_Division_scaling, isEnteringMitotic, contractileSpringGrowthProgress)); // } void SceCells::applyMembContraction2(double timeRatio, double timeRatio_Crit_actomyo, double timeRatio_Crit_Division, double distFromNucleus_max, double distFromNucleus_min, double mitoRndActomyoStrengthScaling) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin2(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); // double* nodeLocZAddr = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().nodeLocZ[0])); /*double* ActomyosinMultipBasal = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActomyosinMultip_basal[0])); double* ActomyosinMultipApical = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeActomyosinMultip_apical[0]));*/ MembraneType1* nodeTypeAddr=thrust::raw_pointer_cast( &(nodes->getInfoVecs().memNodeType1[0])); int* nodeMemMirrorIndexAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeMemMirrorIndex[0])); // double* contractActomyo_multip = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip[0])); // double* contractActomyo_multip_apical = thrust::raw_pointer_cast( // &(nodes->getInfoVecs().contractActomyo_multip_apical[0])); double* contractActomyo_multip = thrust::raw_pointer_cast( &(cellInfoVecs.contractActomyo_multip_perCell[0])); double* contractActomyo_multip_apical = thrust::raw_pointer_cast( &(cellInfoVecs.contractActomyo_multip_apical_perCell[0])); bool* isEnteringMitotic = thrust::raw_pointer_cast( &(cellInfoVecs.isEnteringMitotic[0])); double* contractileSpringGrowthProgress = thrust::raw_pointer_cast( &(cellInfoVecs.contractileSpringGrowthProgress[0])); double* distFromNucleus_normal = thrust::raw_pointer_cast( &(cellInfoVecs.distFromNucleus_normal[0])); double* distFromNucleus_normal_apical = thrust::raw_pointer_cast( &(cellInfoVecs.distFromNucleus_normal_apical[0])); double* individualCellHeight = thrust::raw_pointer_cast( &(cellInfoVecs.individualCellHeight[0])); double* distFromBasalLoc = thrust::raw_pointer_cast( &(cellInfoVecs.distFromBasalLoc[0])); double* distFromApicalLoc = thrust::raw_pointer_cast( &(cellInfoVecs.distFromApicalLoc[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucDesireDistApical.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocX.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().memNodeType1.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucDesireDistApical.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocX.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.apicalLocY.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin2, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().memNodeType1.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MM_C_X.begin(), nodes->getInfoVecs().nodeF_MM_C_Y.begin(), nodes->getInfoVecs().nodeContractEnergyT.begin(), nodes->getInfoVecs().basalContractPair.begin())), // AddMemContractForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, ActomyosinMultipBasal,ActomyosinMultipApical, nodeTypeAddr,nodeMemMirrorIndexAddr, // timeRatio, timeRatio_Crit_actomyo, timeRatio_Crit_Division)); AddMemContractForce_tmp2(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr,nodeLocYAddr, nodeTypeAddr,nodeMemMirrorIndexAddr, timeRatio, contractActomyo_multip, contractActomyo_multip_apical, distFromNucleus_max, distFromNucleus_min, distFromNucleus_normal, distFromNucleus_normal_apical, mitoRndActomyoStrengthScaling,//percentage_before_timeRatio_Crit_Division_scaling, isEnteringMitotic, contractileSpringGrowthProgress, individualCellHeight, distFromBasalLoc, distFromApicalLoc)); } // this function is not currently active. it was useful when nucleus is modeled as one point node and then force interaction between nucleus and other nodes was implemented in this function. The force interaction is one-way so only effect of nucleus on other nodes. void SceCells::applyForceInteractionNucleusAsPoint() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); //double grthPrgrCriVal_M = growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucleusLocX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.nucleusLocY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.nucleusLocX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.nucleusLocY.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddNucleusForce(grthPrgrCriVal_M)); } void SceCells::PlotNucleus (int & lastPrintNucleus, int & outputFrameNucleus) { lastPrintNucleus=lastPrintNucleus+1 ; if (lastPrintNucleus>=10000) { outputFrameNucleus++ ; lastPrintNucleus=0 ; std::string vtkFileName = "Nucleus_" + patch::to_string(outputFrameNucleus-1) + ".vtk"; ofstream NucleusOut; NucleusOut.open(vtkFileName.c_str()); NucleusOut<< "# vtk DataFile Version 3.0" << endl; NucleusOut<< "Result for paraview 2d code" << endl; NucleusOut << "ASCII" << endl; NucleusOut << "DATASET UNSTRUCTURED_GRID" << std::endl; NucleusOut << "POINTS " << allocPara_m.currentActiveCellCount << " float" << std::endl; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { NucleusOut << cellInfoVecs.InternalAvgX[i] << " " << cellInfoVecs.InternalAvgY[i] << " " << 0.0 << std::endl; } NucleusOut<< std::endl; NucleusOut.close(); } } // __device__ // void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double forceValue = 0; // // if (growPro > grthPrgrCriEnd_M) { // if (1 < 0){ // if (linkLength < sceIBDiv_M[4]) { // forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] // * exp(-linkLength / sceIBDiv_M[2]) // + sceIBDiv_M[1] / sceIBDiv_M[3] // * exp(-linkLength / sceIBDiv_M[3]); // } // } else if (2< 0){//(growPro > grthPrgrCriVal_M) { // double percent = (growPro - grthPrgrCriVal_M) // / (grthPrgrCriEnd_M - grthPrgrCriVal_M); // double lenLimit = percent * (sceIBDiv_M[4]) // + (1.0 - percent) * sceIB_M[4]; // if (linkLength < lenLimit) { // double intnlBPara0 = percent * (sceIBDiv_M[0]) // + (1.0 - percent) * sceIB_M[0]; // double intnlBPara1 = percent * (sceIBDiv_M[1]) // + (1.0 - percent) * sceIB_M[1]; // double intnlBPara2 = percent * (sceIBDiv_M[2]) // + (1.0 - percent) * sceIB_M[2]; // double intnlBPara3 = percent * (sceIBDiv_M[3]) // + (1.0 - percent) * sceIB_M[3]; // forceValue = -intnlBPara0 / intnlBPara2 // * exp(-linkLength / intnlBPara2) // + intnlBPara1 / intnlBPara3 // * exp(-linkLength / intnlBPara3); // } // } else { // if (linkLength < sceIB_M[4]) { // forceValue = -sceIB_M[0] / sceIB_M[2] // * exp(-linkLength / sceIB_M[2]) // + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); // } // } // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M, bool enteringMitotic) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; // if (growPro > grthPrgrCriEnd_M) { if (1 < 0){ if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2< 0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (enteringMitotic == true){ if (linkLength < 1.25*sceIB_M[4]) { forceValue = (-sceIB_M[0] / sceIB_M[2] * exp(-linkLength / (1.25*sceIB_M[2])) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / (1.25*sceIB_M[3]))); } } else{ if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void CalAndAddIMEnergy(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& IMEnergyT, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double IMEnergy = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { IMEnergy = sceIBDiv_M[0] * exp(-linkLength / sceIBDiv_M[2]) -sceIBDiv_M[1] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; IMEnergy = intnlBPara0 * exp(-linkLength / intnlBPara2) - intnlBPara1 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { IMEnergy = sceIB_M[0] * exp(-linkLength / sceIB_M[2]) - sceIB_M[1] * exp(-linkLength / sceIB_M[3]); } } IMEnergyT=IMEnergyT+IMEnergy ; } //Ali function added for eventually computing pressure for each cells // __device__ // void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double forceValue = 0; // if (1<0){//if (growPro > grthPrgrCriEnd_M) { // if (linkLength < sceIBDiv_M[4]) { // forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] // * exp(-linkLength / sceIBDiv_M[2]) // + sceIBDiv_M[1] / sceIBDiv_M[3] // * exp(-linkLength / sceIBDiv_M[3]); // } // } else if (2<0){//(growPro > grthPrgrCriVal_M) { // double percent = (growPro - grthPrgrCriVal_M) // / (grthPrgrCriEnd_M - grthPrgrCriVal_M); // double lenLimit = percent * (sceIBDiv_M[4]) // + (1.0 - percent) * sceIB_M[4]; // if (linkLength < lenLimit) { // double intnlBPara0 = percent * (sceIBDiv_M[0]) // + (1.0 - percent) * sceIB_M[0]; // double intnlBPara1 = percent * (sceIBDiv_M[1]) // + (1.0 - percent) * sceIB_M[1]; // double intnlBPara2 = percent * (sceIBDiv_M[2]) // + (1.0 - percent) * sceIB_M[2]; // double intnlBPara3 = percent * (sceIBDiv_M[3]) // + (1.0 - percent) * sceIB_M[3]; // forceValue = -intnlBPara0 / intnlBPara2 // * exp(-linkLength / intnlBPara2) // + intnlBPara1 / intnlBPara3 // * exp(-linkLength / intnlBPara3); // } // } else { // if (linkLength < sceIB_M[4]) { // forceValue = -sceIB_M[0] / sceIB_M[2] // * exp(-linkLength / sceIB_M[2]) // + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); // } // } // F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; // F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M, bool enteringMitotic) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (1<0){//if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (enteringMitotic == true){ if (linkLength < 1.25*sceIB_M[4]) { forceValue = (-sceIB_M[0] / sceIB_M[2] * exp(-linkLength / (1.25*sceIB_M[2])) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / (1.25*sceIB_M[3]))); } } else{ if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddMM_ContractRepl(double& xPos, double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; double sceMM_C[5] ; for (int i=0 ; i<5 ; i++) { sceMM_C[i]=sceIIDiv_M[i] ; } if (linkLength < sceMM_C[4]) { forceValue = -sceMM_C[0] / sceMM_C[2] * exp(-linkLength / sceMM_C[2]) + sceMM_C[1] / sceMM_C[3] * exp(-linkLength / sceMM_C[3]); } F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } // __device__ // void calAndAddMM_ContractAdh(double& xPos, double& yPos, double& xPos2, double& yPos2, // double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y) { // double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); // double lZero=0.03125 ; // //double kCAdh=30 ; // double forceValue = 0; // if (linkLength > lZero) { // forceValue =kContractMemb*(linkLength-lZero) ; // } // F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; // F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; // xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; // yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; // } __device__ void calAndAddMM_ContractAdh(double& xPos, double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes, double & F_MM_C_X, double & F_MM_C_Y, double& kContrMemb_multip, double& kContrMemb_multip2) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double lZero=0.03125 ; //double kCAdh=30 ; double forceValue = 0; double scaling = (kContrMemb_multip + kContrMemb_multip2)/2.0; if (linkLength > lZero) { forceValue =scaling*kContractMemb*(linkLength-lZero) ; } F_MM_C_X=F_MM_C_X+forceValue * (xPos2 - xPos) / linkLength; F_MM_C_Y=F_MM_C_Y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void CalAndAddIIEnergy(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& IIEnergyT, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double IIEnergy = 0; if (1<0){//(growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { IIEnergy= sceIIDiv_M[0]* exp(-linkLength / sceIIDiv_M[2]) - sceIIDiv_M[1]* exp(-linkLength / sceIIDiv_M[3]); } } else if (2<0){//(growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; IIEnergy = intraPara0 * exp(-linkLength / intraPara2) -intraPara1 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { IIEnergy = sceII_M[0] * exp(-linkLength / sceII_M[2]) -sceII_M[1] * exp(-linkLength / sceII_M[3]); } } IIEnergyT=IIEnergyT+IIEnergy ; } __device__ void calAndAddNucleusEffect(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceNDiv_M[4]) { forceValue = -sceNDiv_M[0] / sceNDiv_M[2] * exp(-linkLength / sceNDiv_M[2]) + sceNDiv_M[1] / sceNDiv_M[3] * exp(-linkLength / sceNDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceNDiv_M[4]) + (1.0 - percent) * sceN_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceNDiv_M[0]) + (1.0 - percent) * sceN_M[0]; double intraPara1 = percent * (sceNDiv_M[1]) + (1.0 - percent) * sceN_M[1]; double intraPara2 = percent * (sceNDiv_M[2]) + (1.0 - percent) * sceN_M[2]; double intraPara3 = percent * (sceNDiv_M[3]) + (1.0 - percent) * sceN_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceN_M[4]) { forceValue = -sceN_M[0] / sceN_M[2] * exp(-linkLength / sceN_M[2]) + sceN_M[1] / sceN_M[3] * exp(-linkLength / sceN_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } void SceCells::writeNucleusIniLocPercent() { ofstream output ; thrust::host_vector <double> nucleusLocPercentHost ; string uniqueSymbolOutput = globalConfigVars.getConfigValue("UniqueSymbol").toString(); std::string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbolOutput + "Resume.cfg"; output.open(resumeFileName.c_str() ); nucleusLocPercentHost=cellInfoVecs.nucleusLocPercent ; for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ output << i <<" "<<nucleusLocPercentHost[i] << endl ; } output.close() ; } // void SceCells::readNucleusIniLocPercent() { // ifstream input ; // vector <double> nucleusLocPercentHost ; // int dummy ; // double percent ; // string uniqueSymbol = globalConfigVars.getConfigValue("UniqueSymbol").toString(); // string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbol + "Resume.cfg"; // input.open(resumeFileName.c_str() ); // if (input.is_open()) { // cout << " Suceessfully openend resume input file for initial locations of nucleus" << endl ; // } // else{ // throw std::invalid_argument ("Failed openening the resume input file for initial locations of nucleus") ; // } // for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ // input >> dummy >> percent ; // nucleusLocPercentHost.push_back(percent) ; // } // input.close() ; // cellInfoVecs.nucleusLocPercent= nucleusLocPercentHost ; // } // Original function dealing with nucleus position percentage if there is no division at all. void SceCells::readNucleusIniLocPercent() { ifstream input ; vector <double> nucleusLocPercentHost ; int dummy ; double percent ; string uniqueSymbol = globalConfigVars.getConfigValue("UniqueSymbol").toString(); string resumeFileName = "./resources/DataFileInitLocNucleusPercent_" + uniqueSymbol + "Resume.cfg"; input.open(resumeFileName.c_str() ); if (input.is_open()) { cout << " Suceessfully openend resume input file for initial locations of nucleus" << endl ; } else{ throw std::invalid_argument ("Failed openening the resume input file for initial locations of nucleus") ; } for (int i=0 ; i<allocPara_m.currentActiveCellCount ; i++){ input >> dummy >> percent ; nucleusLocPercentHost.push_back(percent) ; } if (allocPara_m.currentActiveCellCount < allocPara_m.maxCellCount){ std::cout<<"The number of currently active cell is less than the maximally allowed number, we will reserve space for additional cells."<<std::endl; std::cout<<"The nucleus percentage of inactive cells will be set to 0.0 initially, and must be updated when new cells are introduced."<<std::endl; int gap = allocPara_m.maxCellCount - allocPara_m.currentActiveCellCount; for (int gap_count = 0; gap_count < gap; gap_count++){ nucleusLocPercentHost.push_back(0.0); } } input.close() ; cellInfoVecs.nucleusLocPercent= nucleusLocPercentHost ; } void SceCells::allComponentsMoveImplicitPart() { vector <int> indexPrev, indexNext ; #ifdef debugModeECM cudaEvent_t start1, start2, start3, stop; float elapsedTime1, elapsedTime2, elapsedTime3 ; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&stop); cudaEventRecord(start1, 0); #endif CalRHS(); #ifdef debugModeECM cudaEventRecord(start2, 0); cudaEventSynchronize(start2); cudaEventElapsedTime(&elapsedTime1, start1, start2); #endif EquMotionCoef(indexPrev, indexNext); #ifdef debugModeECM cudaEventRecord(start3, 0); cudaEventSynchronize(start3); cudaEventElapsedTime(&elapsedTime2, start2, start3); #endif UpdateLocations(indexPrev,indexNext); #ifdef debugModeECM cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime3, start3, stop); std::cout << "time 1 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in cell-solver module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; #endif } void SceCells::StoreNodeOldPositions() { //nodes->getInfoVecs().locXOldHost.clear(); //nodes->getInfoVecs().locYOldHost.clear(); //nodes->getInfoVecs().locXOldHost.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().locYOldHost.resize(totalNodeCountForActiveCells) ; thrust::copy (nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocX.begin() + totalNodeCountForActiveCells, nodes->getInfoVecs().locXOldHost.begin()); thrust::copy (nodes->getInfoVecs().nodeLocY.begin() , nodes->getInfoVecs().nodeLocY.begin() + totalNodeCountForActiveCells, nodes->getInfoVecs().locYOldHost.begin()); } void SceCells::CalRHS () { // cout << "total node count for active cells in CalRHS function is="<<totalNodeCountForActiveCells << endl ; //nodes->getInfoVecs().rHSXHost.clear() ; //nodes->getInfoVecs().rHSYHost.clear() ; //nodes->getInfoVecs().rHSXHost.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().rHSYHost.resize(totalNodeCountForActiveCells) ; thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin()))+totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().rHSXHost.begin(), nodes->getInfoVecs().rHSYHost.begin()))); } void SceCells::EquMotionCoef(vector<int> & indexPrev, vector<int> & indexNext) { vector <uint> activeMemCount(allocPara_m.currentActiveCellCount) ; double distWithNext[totalNodeCountForActiveCells] ; double distWithPrev[totalNodeCountForActiveCells] ; int cellRank ; int nodeRank ; indexPrev.clear() ; indexNext.clear() ; //nodes->getInfoVecs().hCoefD.clear() ; //nodes->getInfoVecs().hCoefLd.clear() ; //nodes->getInfoVecs().hCoefUd.clear() ; //nodes->getInfoVecs().nodeIsActiveH.clear(); indexPrev.resize(totalNodeCountForActiveCells) ; indexNext.resize(totalNodeCountForActiveCells) ; //nodes->getInfoVecs().hCoefD.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().hCoefLd.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().hCoefUd.resize(totalNodeCountForActiveCells,0.0) ; //nodes->getInfoVecs().nodeIsActiveH.resize(totalNodeCountForActiveCells) ; thrust::copy (nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActiveH.begin()); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin() , cellInfoVecs.activeMembrNodeCounts.begin()+ allocPara_m.currentActiveCellCount, activeMemCount.begin()); //cout << "Maximum all node per cells is " << allocPara_m.maxAllNodePerCell << endl ; for ( int i=0 ; i< totalNodeCountForActiveCells ; i++) { cellRank=i/allocPara_m.maxAllNodePerCell ; nodeRank=i%allocPara_m.maxAllNodePerCell ; if ( nodeRank<activeMemCount [cellRank]) { indexNext.at(i)=i+1 ; indexPrev.at(i)=i-1 ; if ( nodeRank==activeMemCount [cellRank]-1){ indexNext.at(i)=cellRank*allocPara_m.maxAllNodePerCell ; // cout << "index next for cell rank " << cellRank << " is " << indexNext.at(i) << endl ; } if (nodeRank==0){ indexPrev.at(i)=cellRank*allocPara_m.maxAllNodePerCell +activeMemCount [cellRank]-1 ; // cout << "Active membrane nodes for cell rank " << cellRank << " is " <<activeMemCount [cellRank]<<endl ; // cout << "index previous for cell rank " << cellRank << " is " << indexPrev.at(i) << endl ; } distWithNext[i]=sqrt( pow(nodes->getInfoVecs().locXOldHost[indexNext.at(i)] - nodes->getInfoVecs().locXOldHost[i],2) + pow(nodes->getInfoVecs().locYOldHost[indexNext.at(i)] - nodes->getInfoVecs().locYOldHost[i],2)) ; distWithPrev[i]=sqrt( pow(nodes->getInfoVecs().locXOldHost[indexPrev.at(i)] - nodes->getInfoVecs().locXOldHost[i],2) + pow(nodes->getInfoVecs().locYOldHost[indexPrev.at(i)] - nodes->getInfoVecs().locYOldHost[i],2)); } } double sponLen= globalConfigVars.getConfigValue("MembrEquLen").toDouble(); double k= globalConfigVars.getConfigValue("MembrStiff").toDouble(); for ( int i=0 ; i< totalNodeCountForActiveCells ; i++) { if (nodes->getInfoVecs().nodeIsActiveH.at(i)==false) { continue ; } cellRank=i / allocPara_m.maxAllNodePerCell; nodeRank=i % allocPara_m.maxAllNodePerCell; if (nodeRank<activeMemCount [cellRank]) { nodes->getInfoVecs().hCoefD[i]= 1 + k*dt/Damp_Coef*( 2 - sponLen/(distWithPrev[i]+0.2*sponLen) - sponLen/(distWithNext[i]+0.2*sponLen)) ; nodes->getInfoVecs().hCoefLd[i]= k*dt/Damp_Coef*(-1 + sponLen/(distWithPrev[i]+0.2*sponLen)) ; nodes->getInfoVecs().hCoefUd[i]= k*dt/Damp_Coef*(-1 + sponLen/(distWithNext[i]+0.2*sponLen)) ; } else { // no spring between neighboring points exist nodes->getInfoVecs().hCoefD[i]=1.0 ; nodes->getInfoVecs().hCoefLd[i]=0.0 ; nodes->getInfoVecs().hCoefUd[i]=0.0 ; } } } void SceCells::UpdateLocations(const vector <int> & indexPrev,const vector <int> & indexNext ) { vector <double> locXTmpHost=solverPointer->SOR3DiagPeriodic(nodes->getInfoVecs().nodeIsActiveH, nodes->getInfoVecs().hCoefLd, nodes->getInfoVecs().hCoefD, nodes->getInfoVecs().hCoefUd, nodes->getInfoVecs().rHSXHost, indexPrev,indexNext, nodes->getInfoVecs().locXOldHost); vector <double> locYTmpHost=solverPointer->SOR3DiagPeriodic(nodes->getInfoVecs().nodeIsActiveH, nodes->getInfoVecs().hCoefLd, nodes->getInfoVecs().hCoefD, nodes->getInfoVecs().hCoefUd, nodes->getInfoVecs().rHSYHost, indexPrev,indexNext, nodes->getInfoVecs().locYOldHost); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple (locXTmpHost.begin(), locYTmpHost.begin())), thrust::make_zip_iterator( thrust::make_tuple (locXTmpHost.begin(), locYTmpHost.begin()))+totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple (nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin()))); }
d69aef90672f70b9baf317902ea9f638f252f9ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "contrib_ops/cuda/quantization/qordered_ops/qordered_qdq_impl.h" #include <hipcub/hipcub.hpp> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "contrib_ops/cuda/quantization/qordered_ops/qordered_common.cuh" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11040 template <typename FloatT> struct DequantizeVec { }; template <> struct DequantizeVec<float> { typedef char QuantizedVecT; typedef float DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const float fpvals, const float inv_scale) { float dqval = fmaxf(fminf(127.0f, fpvals * inv_scale), -128.0f); return static_cast<char>(__float2int_rn(dqval)); } static __device__ inline float Dequantize(const QuantizedVecT qvals, const float scale) { return scale * qvals; } }; template <> struct DequantizeVec<float2> { typedef char2 QuantizedVecT; typedef float DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const float2 fpvals, const float inv_scale) { float dqvalx = fmaxf(fminf(127.0f, fpvals.x * inv_scale), -128.0f); float dqvaly = fmaxf(fminf(127.0f, fpvals.y * inv_scale), -128.0f); return char2{static_cast<char>(__float2int_rn(dqvalx)), static_cast<char>(__float2int_rn(dqvaly))}; } static __device__ inline float2 Dequantize(const QuantizedVecT qvals, const float scale) { return float2{scale * qvals.x, scale * qvals.y}; } }; template <> struct DequantizeVec<__half> { typedef char QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half fpvals, const __half inv_scale) { int i = __half2int_rn(fpvals * inv_scale); return static_cast<char>(min(127, max(i, -128))); } static __device__ inline __half Quantize(const QuantizedVecT qvals, const __half scale) { return scale * __short2half_rn(static_cast<short>(qvals)); } }; template <> struct DequantizeVec<__half2> { typedef char2 QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half2 fpvals, const __half inv_scales) { __half2 xy = fpvals * __half2half2(inv_scales); U1S2 s2xy; s2xy.s2.x = __half2short_rn(xy.x); s2xy.s2.y = __half2short_rn(xy.y); s2xy.u1 = __vmaxs2(__vmins2(s2xy.u1, 0x007F007F), 0xFF80FF80); return char2{(char)s2xy.s2.x, (char)s2xy.s2.y}; } static __device__ inline __half2 Dequantize(const QuantizedVecT qvals, const __half scale) { return __half2{scale * __short2half_rn(qvals.x), scale * __short2half_rn(qvals.y)}; } }; template <> struct DequantizeVec<__half4> { typedef char4 QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half4 fpvals, const __half inv_scales) { return QuantizeHalf4Char4(fpvals, __half2half2(inv_scales)); } static __device__ inline __half4 Dequantize(const QuantizedVecT qvals, const __half scale) { return __half4{__half2{scale * __short2half_rn(qvals.x), scale * __short2half_rn(qvals.y)}, __half2{scale * __short2half_rn(qvals.z), scale * __short2half_rn(qvals.w)}}; } }; /************************************************************************ * Quantize Routines: * - OrderRow (fp16/32) to OrderCol32 (cols % 32 == 0) ************************************************************************/ // source matrix block 32 x 32, each thread handle 4 int8 items, so: // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void QOrderQuantizeHalfRowToCol32Kernel(const __half* __restrict__ src, size_t src_batch_stride, int8_t* __restrict__ dst, size_t dst_batch_stride, const __half2 inverse_scale2, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t src_index = (src_batch_stride * blockIdx.z) + (r * cols + c); const size_t dst_index = (dst_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); __half4 const src_val4 = *((const __half4*)(src + src_index)); *(char4*)(dst + dst_index) = QuantizeHalf4Char4(src_val4, inverse_scale2); } } // cols could be divide by 32 Status QOrderQuantizeRowToCol32(hipStream_t stream, const hipDeviceProp_t& /*device_prop*/, const __half* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); __half2 inverse_scale2 = __float2half2_rn(1.0f / scale); dim3 threads(8, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; hipLaunchKernelGGL(( QOrderQuantizeHalfRowToCol32Kernel), dim3(blocks), dim3(threads), 0, stream, src, stride, dst, stride, inverse_scale2, rows, cols); return CUDA_CALL(hipGetLastError()); } // source matrix block (32 x ElementsPerThread) x 32, each thread handle ElementsPerThread elements items, so: // thread block size should be (32 cols, 32 rows, 1) // grid size ((cols + 32*ElementsPerThread - 1) / (32 * ElementsPerThread), (rows + 31) / 32), batch) template <unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeFloatRowToCol32Kernel(const float* __restrict__ src, size_t src_batch_stride, int8_t* __restrict__ dst, size_t dst_batch_stride, const float inverse_scale, unsigned rows, unsigned cols) { unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; static constexpr unsigned kColsPerIncrement = 32; // it is the blockDim.x if (r < rows) { unsigned int c = blockIdx.x * (kColsPerIncrement * ElementsPerThread) + threadIdx.x; size_t src_index = (src_batch_stride * blockIdx.z) + (r * cols + c); size_t dst_index = (dst_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1f)); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (c < cols) { *(dst + dst_index) = QuantizeFloatS8(*(src + src_index), inverse_scale); c += kColsPerIncrement; src_index += kColsPerIncrement; dst_index += rows * kColsPerIncrement; } } } } // cols could be divide by 32 Status QOrderQuantizeRowToCol32(hipStream_t stream, const hipDeviceProp_t& /*device_prop*/, const float* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); constexpr unsigned kElementsPerThread = 4; float inverse_scale = 1.0f / scale; dim3 threads(32, 32, 1); dim3 blocks((cols + (32 * kElementsPerThread - 1)) / (kElementsPerThread * 32), (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; hipLaunchKernelGGL(( QOrderQuantizeFloatRowToCol32Kernel), dim3(blocks), dim3(threads), 0, stream, src, stride, dst, stride, inverse_scale, rows, cols); return CUDA_CALL(hipGetLastError()); } /************************************************************************ * Dequantize Routines: * - Col32 to OrderRow (fp16/32) (cols % 32 == 0) ************************************************************************/ // target matrix block 32 x 32, each thread handle 4 int8 items, so: // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void QOrderDequantizeCol32ToHalfRowKernel(const int8_t* __restrict__ src, size_t src_batch_stride, __half* __restrict__ dst, size_t dst_batch_stride, const __half2 scale2, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t dst_index = (dst_batch_stride * blockIdx.z) + (r * cols + c); const size_t src_index = (src_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); const char4 src_ch4 = *((const char4*)(src + src_index)); *(__half4*)(dst + dst_index) = DeqantizeChar4Half4(src_ch4, scale2); } } // cols could be divide by 32 Status QOrderDequantizeCol32ToRow(hipStream_t stream, const hipDeviceProp_t& /*device_prop*/, const int8_t* src, __half* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); __half2 scale2 = __float2half2_rn(scale); dim3 threads(8, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; hipLaunchKernelGGL(( QOrderDequantizeCol32ToHalfRowKernel), dim3(blocks), dim3(threads), 0, stream, src, stride, dst, stride, scale2, rows, cols); return CUDA_CALL(hipGetLastError()); } // target matrix block 32 x 32, each thread handle 1 items, so: // thread block size should be (32, 32 rows, 1) // grid size ((cols / 32), (rows + 31) / 32), batch) __global__ void QOrderDequantizeCol32ToFloatRowKernel(const int8_t* __restrict__ src, size_t src_batch_stride, float* __restrict__ dst, size_t dst_batch_stride, float scale, unsigned rows, unsigned cols) { unsigned int c = blockIdx.x * blockDim.x + threadIdx.x; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t dst_index = (dst_batch_stride * blockIdx.z) + (r * cols + c); const size_t src_index = (src_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); dst[dst_index] = scale * static_cast<float>(src[src_index]); } } Status QOrderDequantizeCol32ToRow(hipStream_t stream, const hipDeviceProp_t& /*device_prop*/, const int8_t* src, float* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); dim3 threads(32, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; hipLaunchKernelGGL(( QOrderDequantizeCol32ToFloatRowKernel), dim3(blocks), dim3(threads), 0, stream, src, stride, dst, stride, scale, rows, cols); return CUDA_CALL(hipGetLastError()); } /************************************************************************ * Quantize Routines: * - fp16/32 input, do no care order ************************************************************************/ // C++17 constexpr not supported, use below trick template <typename T> struct FloatVecSelector {}; template <> struct FloatVecSelector<__half> { typedef __half4 FloatVecT; }; template <> struct FloatVecSelector<float> { typedef float2 FloatVecT; }; // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <typename FloatVecT, unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeKernel(const typename DequantizeVec<FloatVecT>::DequantizedScalarT* __restrict__ src, int8_t* __restrict__ dst, size_t N, const typename DequantizeVec<FloatVecT>::DequantizedScalarT inverse_scale) { typedef typename DequantizeVec<FloatVecT>::QuantizedVecT CharVecT; size_t index = (size_t)blockIdx.x * blockDim.x * (sizeof(CharVecT) * ElementsPerThread) + threadIdx.x * sizeof(CharVecT); unsigned inc_per_iter = blockDim.x * sizeof(CharVecT); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { FloatVecT src_vals = *(const FloatVecT*)(src + index); *(CharVecT*)(dst + index) = DequantizeVec<FloatVecT>::Quantize(src_vals, inverse_scale); index += inc_per_iter; } } } template <unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeHalfStrictKernel(const __half* __restrict__ src, int8_t* __restrict__ dst, size_t N, const float inverse_scale) { unsigned inc_per_iter = blockDim.x * sizeof(char4); size_t index = (size_t)blockIdx.x * blockDim.x * (sizeof(char4) * ElementsPerThread) + threadIdx.x * sizeof(char4); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { __half4 src_vals = *(const __half4*)(src + index); *(char4*)(dst + index) = QuantizeHalf4Char4Strict(src_vals, inverse_scale); index += inc_per_iter; } } } template <typename T> Status QOrderQuantize(hipStream_t stream, const hipDeviceProp_t& /* device_prop */, const T* src, int8_t* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); typedef typename FloatVecSelector<T>::FloatVecT FloatVecT; typedef typename DequantizeVec<FloatVecT>::QuantizedVecT QuantizedVecT; static constexpr unsigned kElementsPerThread = 4; unsigned int threads = 256; unsigned int EPB = threads * sizeof(QuantizedVecT) * kElementsPerThread; T inverse_scale = (T)(1.0f / (scale)); unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); hipLaunchKernelGGL(( QOrderQuantizeKernel<FloatVecT, kElementsPerThread>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, inverse_scale); return CUDA_CALL(hipGetLastError()); } Status QOrderQuantize_Strict(hipStream_t stream, const hipDeviceProp_t& /* device_prop*/, const __half* src, int8_t* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); static constexpr unsigned kElementsPerThread = 4; unsigned int threads = 256; unsigned int EPB = threads * sizeof(char4) * kElementsPerThread; float inverse_scale = 1.0f / scale; unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); hipLaunchKernelGGL(( QOrderQuantizeHalfStrictKernel<kElementsPerThread>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, inverse_scale); return CUDA_CALL(hipGetLastError()); } template Status QOrderQuantize<float>(hipStream_t stream, const hipDeviceProp_t& device_prop, const float* src, int8_t* dst, float scale, size_t N); template Status QOrderQuantize<__half>(hipStream_t stream, const hipDeviceProp_t& device_prop, const __half* src, int8_t* dst, float scale, size_t N); /************************************************************************ * Dequantize Routines: * - fp16/32 output, do no care order ************************************************************************/ // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <typename FloatVecT, unsigned ElementsPerThread = 4> __global__ void QOrderDequantizeKernel(const int8_t* __restrict__ src, const typename DequantizeVec<FloatVecT>::DequantizedScalarT* __restrict__ dst, size_t N, const typename DequantizeVec<FloatVecT>::DequantizedScalarT scale) { typedef typename DequantizeVec<FloatVecT>::QuantizedVecT CharVecT; unsigned inc_per_iter = blockDim.x * sizeof(CharVecT); size_t index = (size_t)blockIdx.x * inc_per_iter * ElementsPerThread + threadIdx.x * sizeof(CharVecT); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { CharVecT src_vals = *(const CharVecT*)(src + index); *(FloatVecT*)(dst + index) = DequantizeVec<FloatVecT>::Dequantize(src_vals, scale); index += inc_per_iter; } } } template <typename T> Status QOrderDequantize(hipStream_t stream, const hipDeviceProp_t& /* device_prop */, const int8_t* src, T* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); typedef typename FloatVecSelector<T>::FloatVecT FloatVecT; typedef typename DequantizeVec<FloatVecT>::QuantizedVecT QuantizedVecT; static constexpr unsigned kElementsPerThread = 2; unsigned int threads = 256; unsigned int EPB = threads * sizeof(QuantizedVecT) * kElementsPerThread; T scale_as_T = (T)(scale); unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); hipLaunchKernelGGL(( QOrderDequantizeKernel<FloatVecT, kElementsPerThread>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, scale_as_T); return CUDA_CALL(hipGetLastError()); } // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <unsigned ElementsPerThread = 4> __global__ void QOrderDequantizeKernel_Strict(const int8_t* __restrict__ src, const __half* __restrict__ dst, size_t N, const float scale) { unsigned inc_per_iter = blockDim.x * sizeof(char4); size_t index = (size_t)blockIdx.x * inc_per_iter * ElementsPerThread + threadIdx.x * sizeof(char4); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { char4 src_vals = *(const char4*)(src + index); *(__half4*)(dst + index) = DeqantizeChar4Half4Strict(src_vals, scale); index += inc_per_iter; } } } Status QOrderDequantize_Strict(hipStream_t stream, const hipDeviceProp_t& device_prop, const int8_t* src, __half* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); static constexpr unsigned kElementsPerThread = 2; unsigned int threads = 256; unsigned int EPB = threads * sizeof(char4) * kElementsPerThread; unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); hipLaunchKernelGGL(( QOrderDequantizeKernel_Strict<kElementsPerThread>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, scale); return CUDA_CALL(hipGetLastError()); } template Status QOrderDequantize<float>(hipStream_t stream, const hipDeviceProp_t& device_prop, const int8_t* src, float* dst, float scale, size_t N); template Status QOrderDequantize<__half>(hipStream_t stream, const hipDeviceProp_t& device_prop, const int8_t* src, __half* dst, float scale, size_t N); Status QOrderDequantizeToRow(cublasLtOrder_t input_order, hipStream_t stream, const hipDeviceProp_t& device_prop, const int8_t* src, __half* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF((input_order != CUBLASLT_ORDER_ROW) && (input_order != CUBLASLT_ORDER_COL32), "Order currently not supported!"); if (input_order == CUBLASLT_ORDER_ROW) { return QOrderDequantize_Strict(stream, device_prop, src, dst, scale, (size_t)batch * rows * cols); } else { // if (input_order == CUBLASLT_ORDER_COL32) { return QOrderDequantizeCol32ToRow(stream, device_prop, src, dst, scale, batch, rows, cols); } } Status QOrderQuantizeRowTo(cublasLtOrder_t output_order, hipStream_t stream, const hipDeviceProp_t& device_prop, const __half* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF((output_order != CUBLASLT_ORDER_ROW) && (output_order != CUBLASLT_ORDER_COL32), "Order currently not supported!"); if (output_order == CUBLASLT_ORDER_ROW) { return QOrderQuantize_Strict(stream, device_prop, src, dst, scale, (size_t)batch * rows * cols); } else { // if (output_order == CUBLASLT_ORDER_COL32) { return QOrderQuantizeRowToCol32(stream, device_prop, src, dst, scale, batch, rows, cols); } } // source matrix block 32 x 32, each thread handle 4 int8_t items, // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void ReorderS8RowToCol32Kernel(const int8_t* __restrict__ src, int8_t* __restrict__ dst, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t batch_start = blockIdx.z * (rows * cols); const size_t src_index = batch_start + (r * cols + c); const size_t dst_index = batch_start + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1f)); *(char4*)(dst + dst_index) = *((const char4*)(src + src_index)); } } Status ReorderS8RowToCol32(hipStream_t stream, const hipDeviceProp_t& /* device_prop */, const int8_t* src, int8_t* dst, unsigned batch, unsigned rows, unsigned cols) { dim3 threads(8, 32, 1); dim3 blocks((unsigned)(cols / 32), (unsigned)((rows + 31) / 32), batch); hipLaunchKernelGGL(( ReorderS8RowToCol32Kernel), dim3(blocks), dim3(threads), 0, stream, src, dst, rows, cols); return CUDA_CALL(hipGetLastError()); } Status Reorder(cublasLtHandle_t cublasLt, hipStream_t stream, const hipDeviceProp_t& device_prop, int32_t batchCount, int64_t rows, int64_t cols, hipDataType data_type, const void* input, cublasLtOrder_t order_input, void* output, cublasLtOrder_t order_output) { if (data_type == HIP_R_8I && order_input == CUBLASLT_ORDER_ROW && order_output == CUBLASLT_ORDER_COL32) { return ReorderS8RowToCol32(stream, device_prop, (const int8_t*)input, (int8_t*)output, (unsigned)batchCount, static_cast<unsigned>(rows), static_cast<unsigned>(cols)); } cublasLtMatrixTransformDesc_t transform_desc = nullptr; auto clean_transform_desc = gsl::finally([&transform_desc]() {if (transform_desc) cublasLtMatrixTransformDescDestroy(transform_desc); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixTransformDescCreate(&transform_desc, HIP_R_32I)); cublasLtMatrixLayout_t InputLayout = nullptr; auto clean_InputLayout = gsl::finally([&InputLayout]() {if (InputLayout) cublasLtMatrixLayoutDestroy(InputLayout); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutCreate(&InputLayout, data_type, rows, cols, CalcLeadingDimensionLt(rows, cols, order_input))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_input, sizeof(order_input))); cublasLtMatrixLayout_t OutputLayout = nullptr; auto clean_OutputLayout = gsl::finally([&OutputLayout]() {if (OutputLayout) cublasLtMatrixLayoutDestroy(OutputLayout); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutCreate(&OutputLayout, data_type, rows, cols, CalcLeadingDimensionLt(rows, cols, order_output))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_output, sizeof(order_output))); if (batchCount > 1) { int64_t batch_stride_input = rows * cols; CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batchCount, sizeof(batchCount))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &batch_stride_input, sizeof(batch_stride_input))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batchCount, sizeof(batchCount))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &batch_stride_input, sizeof(batch_stride_input))); } int32_t alpha = 1; int32_t beta = 0; CUBLAS_RETURN_IF_ERROR(cublasLtMatrixTransform(cublasLt, transform_desc, &alpha, input, InputLayout, &beta, nullptr, nullptr, output, OutputLayout, stream)); return Status::OK(); }; int64_t CalcLeadingDimensionLt(int64_t rows, int64_t cols, cublasLtOrder_t order) { switch (order) { case CUBLASLT_ORDER_ROW: return cols; case CUBLASLT_ORDER_COL: return rows; case CUBLASLT_ORDER_COL32: return 32 * rows; case CUBLASLT_ORDER_COL4_4R2_8C: return 32 * ((rows + 8 - 1) / 8) * 8; case CUBLASLT_ORDER_COL32_2R_4R4: return 32 * ((rows + 32 - 1) / 32) * 32; default: return 0; } } #endif } // namespace cuda } // namespace contrib } // namespace onnxruntime
d69aef90672f70b9baf317902ea9f638f252f9ae.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "contrib_ops/cuda/quantization/qordered_ops/qordered_qdq_impl.h" #include <cub/cub.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "contrib_ops/cuda/quantization/qordered_ops/qordered_common.cuh" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { #if defined(CUDA_VERSION) && CUDA_VERSION >= 11040 template <typename FloatT> struct DequantizeVec { }; template <> struct DequantizeVec<float> { typedef char QuantizedVecT; typedef float DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const float fpvals, const float inv_scale) { float dqval = fmaxf(fminf(127.0f, fpvals * inv_scale), -128.0f); return static_cast<char>(__float2int_rn(dqval)); } static __device__ inline float Dequantize(const QuantizedVecT qvals, const float scale) { return scale * qvals; } }; template <> struct DequantizeVec<float2> { typedef char2 QuantizedVecT; typedef float DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const float2 fpvals, const float inv_scale) { float dqvalx = fmaxf(fminf(127.0f, fpvals.x * inv_scale), -128.0f); float dqvaly = fmaxf(fminf(127.0f, fpvals.y * inv_scale), -128.0f); return char2{static_cast<char>(__float2int_rn(dqvalx)), static_cast<char>(__float2int_rn(dqvaly))}; } static __device__ inline float2 Dequantize(const QuantizedVecT qvals, const float scale) { return float2{scale * qvals.x, scale * qvals.y}; } }; template <> struct DequantizeVec<__half> { typedef char QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half fpvals, const __half inv_scale) { int i = __half2int_rn(fpvals * inv_scale); return static_cast<char>(min(127, max(i, -128))); } static __device__ inline __half Quantize(const QuantizedVecT qvals, const __half scale) { return scale * __short2half_rn(static_cast<short>(qvals)); } }; template <> struct DequantizeVec<__half2> { typedef char2 QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half2 fpvals, const __half inv_scales) { __half2 xy = fpvals * __half2half2(inv_scales); U1S2 s2xy; s2xy.s2.x = __half2short_rn(xy.x); s2xy.s2.y = __half2short_rn(xy.y); s2xy.u1 = __vmaxs2(__vmins2(s2xy.u1, 0x007F007F), 0xFF80FF80); return char2{(char)s2xy.s2.x, (char)s2xy.s2.y}; } static __device__ inline __half2 Dequantize(const QuantizedVecT qvals, const __half scale) { return __half2{scale * __short2half_rn(qvals.x), scale * __short2half_rn(qvals.y)}; } }; template <> struct DequantizeVec<__half4> { typedef char4 QuantizedVecT; typedef __half DequantizedScalarT; static __device__ inline QuantizedVecT Quantize(const __half4 fpvals, const __half inv_scales) { return QuantizeHalf4Char4(fpvals, __half2half2(inv_scales)); } static __device__ inline __half4 Dequantize(const QuantizedVecT qvals, const __half scale) { return __half4{__half2{scale * __short2half_rn(qvals.x), scale * __short2half_rn(qvals.y)}, __half2{scale * __short2half_rn(qvals.z), scale * __short2half_rn(qvals.w)}}; } }; /************************************************************************ * Quantize Routines: * - OrderRow (fp16/32) to OrderCol32 (cols % 32 == 0) ************************************************************************/ // source matrix block 32 x 32, each thread handle 4 int8 items, so: // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void QOrderQuantizeHalfRowToCol32Kernel(const __half* __restrict__ src, size_t src_batch_stride, int8_t* __restrict__ dst, size_t dst_batch_stride, const __half2 inverse_scale2, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t src_index = (src_batch_stride * blockIdx.z) + (r * cols + c); const size_t dst_index = (dst_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); __half4 const src_val4 = *((const __half4*)(src + src_index)); *(char4*)(dst + dst_index) = QuantizeHalf4Char4(src_val4, inverse_scale2); } } // cols could be divide by 32 Status QOrderQuantizeRowToCol32(cudaStream_t stream, const cudaDeviceProp& /*device_prop*/, const __half* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); __half2 inverse_scale2 = __float2half2_rn(1.0f / scale); dim3 threads(8, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; QOrderQuantizeHalfRowToCol32Kernel<<<blocks, threads, 0, stream>>>(src, stride, dst, stride, inverse_scale2, rows, cols); return CUDA_CALL(cudaGetLastError()); } // source matrix block (32 x ElementsPerThread) x 32, each thread handle ElementsPerThread elements items, so: // thread block size should be (32 cols, 32 rows, 1) // grid size ((cols + 32*ElementsPerThread - 1) / (32 * ElementsPerThread), (rows + 31) / 32), batch) template <unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeFloatRowToCol32Kernel(const float* __restrict__ src, size_t src_batch_stride, int8_t* __restrict__ dst, size_t dst_batch_stride, const float inverse_scale, unsigned rows, unsigned cols) { unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; static constexpr unsigned kColsPerIncrement = 32; // it is the blockDim.x if (r < rows) { unsigned int c = blockIdx.x * (kColsPerIncrement * ElementsPerThread) + threadIdx.x; size_t src_index = (src_batch_stride * blockIdx.z) + (r * cols + c); size_t dst_index = (dst_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1f)); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (c < cols) { *(dst + dst_index) = QuantizeFloatS8(*(src + src_index), inverse_scale); c += kColsPerIncrement; src_index += kColsPerIncrement; dst_index += rows * kColsPerIncrement; } } } } // cols could be divide by 32 Status QOrderQuantizeRowToCol32(cudaStream_t stream, const cudaDeviceProp& /*device_prop*/, const float* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); constexpr unsigned kElementsPerThread = 4; float inverse_scale = 1.0f / scale; dim3 threads(32, 32, 1); dim3 blocks((cols + (32 * kElementsPerThread - 1)) / (kElementsPerThread * 32), (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; QOrderQuantizeFloatRowToCol32Kernel<<<blocks, threads, 0, stream>>>(src, stride, dst, stride, inverse_scale, rows, cols); return CUDA_CALL(cudaGetLastError()); } /************************************************************************ * Dequantize Routines: * - Col32 to OrderRow (fp16/32) (cols % 32 == 0) ************************************************************************/ // target matrix block 32 x 32, each thread handle 4 int8 items, so: // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void QOrderDequantizeCol32ToHalfRowKernel(const int8_t* __restrict__ src, size_t src_batch_stride, __half* __restrict__ dst, size_t dst_batch_stride, const __half2 scale2, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t dst_index = (dst_batch_stride * blockIdx.z) + (r * cols + c); const size_t src_index = (src_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); const char4 src_ch4 = *((const char4*)(src + src_index)); *(__half4*)(dst + dst_index) = DeqantizeChar4Half4(src_ch4, scale2); } } // cols could be divide by 32 Status QOrderDequantizeCol32ToRow(cudaStream_t stream, const cudaDeviceProp& /*device_prop*/, const int8_t* src, __half* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); __half2 scale2 = __float2half2_rn(scale); dim3 threads(8, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; QOrderDequantizeCol32ToHalfRowKernel<<<blocks, threads, 0, stream>>>(src, stride, dst, stride, scale2, rows, cols); return CUDA_CALL(cudaGetLastError()); } // target matrix block 32 x 32, each thread handle 1 items, so: // thread block size should be (32, 32 rows, 1) // grid size ((cols / 32), (rows + 31) / 32), batch) __global__ void QOrderDequantizeCol32ToFloatRowKernel(const int8_t* __restrict__ src, size_t src_batch_stride, float* __restrict__ dst, size_t dst_batch_stride, float scale, unsigned rows, unsigned cols) { unsigned int c = blockIdx.x * blockDim.x + threadIdx.x; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t dst_index = (dst_batch_stride * blockIdx.z) + (r * cols + c); const size_t src_index = (src_batch_stride * blockIdx.z) + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1F)); dst[dst_index] = scale * static_cast<float>(src[src_index]); } } Status QOrderDequantizeCol32ToRow(cudaStream_t stream, const cudaDeviceProp& /*device_prop*/, const int8_t* src, float* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF(cols & 0x1f, "cols can not divide by 32!"); dim3 threads(32, 32, 1); dim3 blocks(cols / 32, (rows + 31) / 32, batch); size_t stride = (size_t)rows * cols; QOrderDequantizeCol32ToFloatRowKernel<<<blocks, threads, 0, stream>>>(src, stride, dst, stride, scale, rows, cols); return CUDA_CALL(cudaGetLastError()); } /************************************************************************ * Quantize Routines: * - fp16/32 input, do no care order ************************************************************************/ // C++17 constexpr not supported, use below trick template <typename T> struct FloatVecSelector {}; template <> struct FloatVecSelector<__half> { typedef __half4 FloatVecT; }; template <> struct FloatVecSelector<float> { typedef float2 FloatVecT; }; // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <typename FloatVecT, unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeKernel(const typename DequantizeVec<FloatVecT>::DequantizedScalarT* __restrict__ src, int8_t* __restrict__ dst, size_t N, const typename DequantizeVec<FloatVecT>::DequantizedScalarT inverse_scale) { typedef typename DequantizeVec<FloatVecT>::QuantizedVecT CharVecT; size_t index = (size_t)blockIdx.x * blockDim.x * (sizeof(CharVecT) * ElementsPerThread) + threadIdx.x * sizeof(CharVecT); unsigned inc_per_iter = blockDim.x * sizeof(CharVecT); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { FloatVecT src_vals = *(const FloatVecT*)(src + index); *(CharVecT*)(dst + index) = DequantizeVec<FloatVecT>::Quantize(src_vals, inverse_scale); index += inc_per_iter; } } } template <unsigned ElementsPerThread = 4> __global__ void QOrderQuantizeHalfStrictKernel(const __half* __restrict__ src, int8_t* __restrict__ dst, size_t N, const float inverse_scale) { unsigned inc_per_iter = blockDim.x * sizeof(char4); size_t index = (size_t)blockIdx.x * blockDim.x * (sizeof(char4) * ElementsPerThread) + threadIdx.x * sizeof(char4); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { __half4 src_vals = *(const __half4*)(src + index); *(char4*)(dst + index) = QuantizeHalf4Char4Strict(src_vals, inverse_scale); index += inc_per_iter; } } } template <typename T> Status QOrderQuantize(cudaStream_t stream, const cudaDeviceProp& /* device_prop */, const T* src, int8_t* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); typedef typename FloatVecSelector<T>::FloatVecT FloatVecT; typedef typename DequantizeVec<FloatVecT>::QuantizedVecT QuantizedVecT; static constexpr unsigned kElementsPerThread = 4; unsigned int threads = 256; unsigned int EPB = threads * sizeof(QuantizedVecT) * kElementsPerThread; T inverse_scale = (T)(1.0f / (scale)); unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); QOrderQuantizeKernel<FloatVecT, kElementsPerThread><<<blocks, threads, 0, stream>>>(src, dst, N, inverse_scale); return CUDA_CALL(cudaGetLastError()); } Status QOrderQuantize_Strict(cudaStream_t stream, const cudaDeviceProp& /* device_prop*/, const __half* src, int8_t* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); static constexpr unsigned kElementsPerThread = 4; unsigned int threads = 256; unsigned int EPB = threads * sizeof(char4) * kElementsPerThread; float inverse_scale = 1.0f / scale; unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); QOrderQuantizeHalfStrictKernel<kElementsPerThread><<<blocks, threads, 0, stream>>>(src, dst, N, inverse_scale); return CUDA_CALL(cudaGetLastError()); } template Status QOrderQuantize<float>(cudaStream_t stream, const cudaDeviceProp& device_prop, const float* src, int8_t* dst, float scale, size_t N); template Status QOrderQuantize<__half>(cudaStream_t stream, const cudaDeviceProp& device_prop, const __half* src, int8_t* dst, float scale, size_t N); /************************************************************************ * Dequantize Routines: * - fp16/32 output, do no care order ************************************************************************/ // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <typename FloatVecT, unsigned ElementsPerThread = 4> __global__ void QOrderDequantizeKernel(const int8_t* __restrict__ src, const typename DequantizeVec<FloatVecT>::DequantizedScalarT* __restrict__ dst, size_t N, const typename DequantizeVec<FloatVecT>::DequantizedScalarT scale) { typedef typename DequantizeVec<FloatVecT>::QuantizedVecT CharVecT; unsigned inc_per_iter = blockDim.x * sizeof(CharVecT); size_t index = (size_t)blockIdx.x * inc_per_iter * ElementsPerThread + threadIdx.x * sizeof(CharVecT); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { CharVecT src_vals = *(const CharVecT*)(src + index); *(FloatVecT*)(dst + index) = DequantizeVec<FloatVecT>::Dequantize(src_vals, scale); index += inc_per_iter; } } } template <typename T> Status QOrderDequantize(cudaStream_t stream, const cudaDeviceProp& /* device_prop */, const int8_t* src, T* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); typedef typename FloatVecSelector<T>::FloatVecT FloatVecT; typedef typename DequantizeVec<FloatVecT>::QuantizedVecT QuantizedVecT; static constexpr unsigned kElementsPerThread = 2; unsigned int threads = 256; unsigned int EPB = threads * sizeof(QuantizedVecT) * kElementsPerThread; T scale_as_T = (T)(scale); unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); QOrderDequantizeKernel<FloatVecT, kElementsPerThread><<<blocks, threads, 0, stream>>>(src, dst, N, scale_as_T); return CUDA_CALL(cudaGetLastError()); } // block size: 256, Lets EPB = 256 * ElementCount(FloatVecT) * ElementsPerThreads // grid size: (N + BLOCK_SIZE * EPB - 1) / EPB template <unsigned ElementsPerThread = 4> __global__ void QOrderDequantizeKernel_Strict(const int8_t* __restrict__ src, const __half* __restrict__ dst, size_t N, const float scale) { unsigned inc_per_iter = blockDim.x * sizeof(char4); size_t index = (size_t)blockIdx.x * inc_per_iter * ElementsPerThread + threadIdx.x * sizeof(char4); #pragma unroll for (int i = 0; i < ElementsPerThread; i++) { if (index < N) { char4 src_vals = *(const char4*)(src + index); *(__half4*)(dst + index) = DeqantizeChar4Half4Strict(src_vals, scale); index += inc_per_iter; } } } Status QOrderDequantize_Strict(cudaStream_t stream, const cudaDeviceProp& device_prop, const int8_t* src, __half* dst, float scale, size_t N) { ORT_RETURN_IF(N & 0x3LL, "N can not divide by 4!"); static constexpr unsigned kElementsPerThread = 2; unsigned int threads = 256; unsigned int EPB = threads * sizeof(char4) * kElementsPerThread; unsigned int blocks = (unsigned int)((N + (EPB - 1)) / EPB); QOrderDequantizeKernel_Strict<kElementsPerThread><<<blocks, threads, 0, stream>>>(src, dst, N, scale); return CUDA_CALL(cudaGetLastError()); } template Status QOrderDequantize<float>(cudaStream_t stream, const cudaDeviceProp& device_prop, const int8_t* src, float* dst, float scale, size_t N); template Status QOrderDequantize<__half>(cudaStream_t stream, const cudaDeviceProp& device_prop, const int8_t* src, __half* dst, float scale, size_t N); Status QOrderDequantizeToRow(cublasLtOrder_t input_order, cudaStream_t stream, const cudaDeviceProp& device_prop, const int8_t* src, __half* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF((input_order != CUBLASLT_ORDER_ROW) && (input_order != CUBLASLT_ORDER_COL32), "Order currently not supported!"); if (input_order == CUBLASLT_ORDER_ROW) { return QOrderDequantize_Strict(stream, device_prop, src, dst, scale, (size_t)batch * rows * cols); } else { // if (input_order == CUBLASLT_ORDER_COL32) { return QOrderDequantizeCol32ToRow(stream, device_prop, src, dst, scale, batch, rows, cols); } } Status QOrderQuantizeRowTo(cublasLtOrder_t output_order, cudaStream_t stream, const cudaDeviceProp& device_prop, const __half* src, int8_t* dst, float scale, unsigned batch, unsigned rows, unsigned cols) { ORT_RETURN_IF((output_order != CUBLASLT_ORDER_ROW) && (output_order != CUBLASLT_ORDER_COL32), "Order currently not supported!"); if (output_order == CUBLASLT_ORDER_ROW) { return QOrderQuantize_Strict(stream, device_prop, src, dst, scale, (size_t)batch * rows * cols); } else { // if (output_order == CUBLASLT_ORDER_COL32) { return QOrderQuantizeRowToCol32(stream, device_prop, src, dst, scale, batch, rows, cols); } } // source matrix block 32 x 32, each thread handle 4 int8_t items, // thread block size should be (8 cols_in_4, 32 rows, 1) // grid size ((cols + 31) / 32, (rows + 31) / 32), batch) __global__ void ReorderS8RowToCol32Kernel(const int8_t* __restrict__ src, int8_t* __restrict__ dst, unsigned rows, unsigned cols) { unsigned int c = (blockIdx.x * blockDim.x + threadIdx.x) << 2; unsigned int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < cols && r < rows) { const size_t batch_start = blockIdx.z * (rows * cols); const size_t src_index = batch_start + (r * cols + c); const size_t dst_index = batch_start + ((c & 0xffffffe0) * rows + (r << 5) + (c & 0x1f)); *(char4*)(dst + dst_index) = *((const char4*)(src + src_index)); } } Status ReorderS8RowToCol32(cudaStream_t stream, const cudaDeviceProp& /* device_prop */, const int8_t* src, int8_t* dst, unsigned batch, unsigned rows, unsigned cols) { dim3 threads(8, 32, 1); dim3 blocks((unsigned)(cols / 32), (unsigned)((rows + 31) / 32), batch); ReorderS8RowToCol32Kernel<<<blocks, threads, 0, stream>>>(src, dst, rows, cols); return CUDA_CALL(cudaGetLastError()); } Status Reorder(cublasLtHandle_t cublasLt, cudaStream_t stream, const cudaDeviceProp& device_prop, int32_t batchCount, int64_t rows, int64_t cols, cudaDataType_t data_type, const void* input, cublasLtOrder_t order_input, void* output, cublasLtOrder_t order_output) { if (data_type == CUDA_R_8I && order_input == CUBLASLT_ORDER_ROW && order_output == CUBLASLT_ORDER_COL32) { return ReorderS8RowToCol32(stream, device_prop, (const int8_t*)input, (int8_t*)output, (unsigned)batchCount, static_cast<unsigned>(rows), static_cast<unsigned>(cols)); } cublasLtMatrixTransformDesc_t transform_desc = nullptr; auto clean_transform_desc = gsl::finally([&transform_desc]() {if (transform_desc) cublasLtMatrixTransformDescDestroy(transform_desc); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixTransformDescCreate(&transform_desc, CUDA_R_32I)); cublasLtMatrixLayout_t InputLayout = nullptr; auto clean_InputLayout = gsl::finally([&InputLayout]() {if (InputLayout) cublasLtMatrixLayoutDestroy(InputLayout); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutCreate(&InputLayout, data_type, rows, cols, CalcLeadingDimensionLt(rows, cols, order_input))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_input, sizeof(order_input))); cublasLtMatrixLayout_t OutputLayout = nullptr; auto clean_OutputLayout = gsl::finally([&OutputLayout]() {if (OutputLayout) cublasLtMatrixLayoutDestroy(OutputLayout); }); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutCreate(&OutputLayout, data_type, rows, cols, CalcLeadingDimensionLt(rows, cols, order_output))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_output, sizeof(order_output))); if (batchCount > 1) { int64_t batch_stride_input = rows * cols; CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batchCount, sizeof(batchCount))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(InputLayout, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &batch_stride_input, sizeof(batch_stride_input))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batchCount, sizeof(batchCount))); CUBLAS_RETURN_IF_ERROR(cublasLtMatrixLayoutSetAttribute(OutputLayout, CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &batch_stride_input, sizeof(batch_stride_input))); } int32_t alpha = 1; int32_t beta = 0; CUBLAS_RETURN_IF_ERROR(cublasLtMatrixTransform(cublasLt, transform_desc, &alpha, input, InputLayout, &beta, nullptr, nullptr, output, OutputLayout, stream)); return Status::OK(); }; int64_t CalcLeadingDimensionLt(int64_t rows, int64_t cols, cublasLtOrder_t order) { switch (order) { case CUBLASLT_ORDER_ROW: return cols; case CUBLASLT_ORDER_COL: return rows; case CUBLASLT_ORDER_COL32: return 32 * rows; case CUBLASLT_ORDER_COL4_4R2_8C: return 32 * ((rows + 8 - 1) / 8) * 8; case CUBLASLT_ORDER_COL32_2R_4R4: return 32 * ((rows + 32 - 1) / 32) * 32; default: return 0; } } #endif } // namespace cuda } // namespace contrib } // namespace onnxruntime
737e36a49db54ab2b6501529320f56a975cbbfa4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <limits.h> #include "ReductionAddTools.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void createGenerator(hiprandState_t* tabDevGeneratorGM, int deviceId); __global__ void monteCarlo(hiprandState_t* tabDevGeneratorGM, int nbFleche, int* ptrDevResultat); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void intraThread(hiprandState_t* tabDevGeneratorGM, int nbFleche, int* tabSM); __device__ float f(float x); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void createGenerator(hiprandState_t* tabDevGeneratorGM, int deviceId) { const int TID = Indice1D::tid(); int deltaSeed = deviceId * INT_MAX / 10000; int deltaSequence = deviceId * 100; int deltaOffset = deviceId * 100; int seed = 1234 + deltaSeed; int sequenceNumber = TID + deltaSequence; int offset = deltaOffset; hiprand_init(seed, sequenceNumber, offset, &tabDevGeneratorGM[TID]); } __global__ void monteCarlo(hiprandState_t* tabDevGeneratorGM, int nbFleche, int* ptrDevResultat) { extern __shared__ int tabSM[]; intraThread(tabDevGeneratorGM, nbFleche, tabSM); __syncthreads(); ReductionAddTools::reductionADD(tabSM, ptrDevResultat); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void intraThread(hiprandState_t* tabDevGeneratorGM, int nbFleche, int* tabSM) { const int TID = Indice1D::tid(); hiprandState_t localGenerator = tabDevGeneratorGM[TID]; float xAlea; float yAlea; int nbFlecheIn = 0; for (int i = 0; i < nbFleche; ++i) { xAlea = hiprand_uniform(&localGenerator); yAlea = 4 * hiprand_uniform(&localGenerator); // { //Variante 1 // if (yAlea < f(xAlea)) // { // nbFlecheIn++; // } // } { //Variante 2 nbFlecheIn += yAlea < f(xAlea); } } tabSM[Indice1D::tidLocal()] = nbFlecheIn; tabDevGeneratorGM[TID] = localGenerator; } __device__ float f(float x) { return 4 / (1 + x * x); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
737e36a49db54ab2b6501529320f56a975cbbfa4.cu
#include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> #include <curand_kernel.h> #include <limits.h> #include "ReductionAddTools.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void createGenerator(curandState* tabDevGeneratorGM, int deviceId); __global__ void monteCarlo(curandState* tabDevGeneratorGM, int nbFleche, int* ptrDevResultat); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void intraThread(curandState* tabDevGeneratorGM, int nbFleche, int* tabSM); __device__ float f(float x); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void createGenerator(curandState* tabDevGeneratorGM, int deviceId) { const int TID = Indice1D::tid(); int deltaSeed = deviceId * INT_MAX / 10000; int deltaSequence = deviceId * 100; int deltaOffset = deviceId * 100; int seed = 1234 + deltaSeed; int sequenceNumber = TID + deltaSequence; int offset = deltaOffset; curand_init(seed, sequenceNumber, offset, &tabDevGeneratorGM[TID]); } __global__ void monteCarlo(curandState* tabDevGeneratorGM, int nbFleche, int* ptrDevResultat) { extern __shared__ int tabSM[]; intraThread(tabDevGeneratorGM, nbFleche, tabSM); __syncthreads(); ReductionAddTools::reductionADD(tabSM, ptrDevResultat); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void intraThread(curandState* tabDevGeneratorGM, int nbFleche, int* tabSM) { const int TID = Indice1D::tid(); curandState localGenerator = tabDevGeneratorGM[TID]; float xAlea; float yAlea; int nbFlecheIn = 0; for (int i = 0; i < nbFleche; ++i) { xAlea = curand_uniform(&localGenerator); yAlea = 4 * curand_uniform(&localGenerator); // { //Variante 1 // if (yAlea < f(xAlea)) // { // nbFlecheIn++; // } // } { //Variante 2 nbFlecheIn += yAlea < f(xAlea); } } tabSM[Indice1D::tidLocal()] = nbFlecheIn; tabDevGeneratorGM[TID] = localGenerator; } __device__ float f(float x) { return 4 / (1 + x * x); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
0f7bdfbc780dbdd8c72410430edbf6517cdbf5be.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <string.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include "knapsack.cuh" #include <iostream> #include <thrust/device_vector.h> #include "kernels_hip.cuh" #include <thrust/system/hip/experimental/pinned_allocator.h> #include <cassert> #define TIMEB struct timeb #define FTIME ftime #define UDT_TIME long #define MAX_SIZE_TO_PRINT 10 UDT_TIME gRefTime = 0; UDT_TIME GetMilliSecondTime(TIMEB timeBuf); void SetTime(void); UDT_TIME GetTime(void); int main(int argc, char* argv[]) { UDT_TIME time; int itemCnt; KnapsackInstance* inst; //a Knapsack instance object KnapsackDPSolver DPSolver; KnapsackBFSolver BFSolver; //brute-force solver KnapsackSolution *BFSoln; if (argc != 2) { printf("Invalid Number of command-line arguments\n"); exit(1); } itemCnt = atoi(argv[1]); if (itemCnt < 1) { printf("Invalid number of items\n"); exit(1); } // Creating Knapsack and solution object inst = new KnapsackInstance(itemCnt); BFSoln = new KnapsackSolution(inst); // Item Generation inside knapsack inst->Generate(); // Print knapsack items inst->Print(); SetTime(); DPSolver.Solve(inst, BFSoln); time = GetTime(); printf("\n\nSolved using dynamic programming (DP) in %ld ms. Optimal value = %d", time, BFSoln->getDPValue()); if (itemCnt <= MAX_SIZE_TO_PRINT) BFSoln->Print("Dynamic Programming Solution"); SetTime(); BFSolver.Solve(inst, BFSoln); printf("\n\nSolved using brute-force enumeration (BF) in %ld ms. Optimal value = %d", time, BFSoln->getlbValue()); if (itemCnt <= MAX_SIZE_TO_PRINT) BFSoln->Print("Brute-Force Solution"); delete inst; delete BFSoln; printf("\n\nProgram Completed Successfully\n"); return 0; } /********************************************************************/ UDT_TIME GetCurrentTime(void) { UDT_TIME crntTime = 0; TIMEB timeBuf; FTIME(&timeBuf); crntTime = GetMilliSecondTime(timeBuf); return crntTime; } /********************************************************************/ void SetTime(void) { gRefTime = GetCurrentTime(); } /********************************************************************/ UDT_TIME GetTime(void) { UDT_TIME crntTime = GetCurrentTime(); return (crntTime - gRefTime); } /********************************************************************/ UDT_TIME GetMilliSecondTime(TIMEB timeBuf) { UDT_TIME mliScndTime; mliScndTime = timeBuf.time; mliScndTime *= 1000; mliScndTime += timeBuf.millitm; return mliScndTime; } class KnapsackItem { public: int weight; int value; float valueperweight; KnapsackItem(int wght, int val) { weight = wght; value = val; valueperweight = (float)val / (float)wght; } }; void KnapsackDPSolver::Solve(KnapsackInstance* inst_, KnapsackSolution* soln_) { inst = inst_; soln = soln_; int n = inst->GetItemCnt(); int c = inst->GetCapacity(); int** a; a = new int*[n + 1]; for (int i = 0; i < n + 1; i++) { a[i] = new int[c + 1]; } for (int j = 0; j <= c; j++) a[0][j] = 0; for (int i = 1; i <= n; i++) { for (int j = 0; j <= c; j++) { if (inst->GetItemWeight(i) > j) a[i][j] = a[i - 1][j]; else a[i][j] = ::max(inst->GetItemValue(i) + a[i - 1][j - inst->GetItemWeight(i)], a[i - 1][j]); } } soln->setDPValue(a[n][c]); int j = c; for (int i = n; i >= 1; i--) { if (a[i][j] > a[i - 1][j]) { printf("%d ", i); j -= inst->GetItemWeight(i); } } delete a; } void KnapsackSolution::setDPValue(int val) { dpvalue = val; } int KnapsackSolution::getDPValue() { return dpvalue; } KnapsackInstance::KnapsackInstance(int itemCnt_) { itemCnt = itemCnt_; weights = new int[itemCnt + 1]; values = new int[itemCnt + 1]; cap = 0; } /********************************************************************/ KnapsackInstance::~KnapsackInstance() { delete[] weights; delete[] values; } /********************************************************************/ void KnapsackInstance::Generate() { int i, wghtSum = 0; std::vector<KnapsackItem> items; weights[0] = 0; values[0] = 0; for (i = 1; i <= itemCnt; i++) { weights[i] = rand() % 100 + 1; values[i] = weights[i] + 10; items.push_back(KnapsackItem(weights[i], values[i])); wghtSum += weights[i]; } std::sort(items.begin(), items.end(), [](KnapsackItem const & a, KnapsackItem const & b) -> bool { return a.valueperweight > b.valueperweight; }); printf("Number of items = %d, Capacity = %d\n", itemCnt, cap); printf("Weights: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", weights[i]); } printf("\nValues: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", values[i]); } printf("\n"); i = 1; for (auto itm = items.begin(); itm != items.end(); itm++, i++) { weights[i] = itm->weight; values[i] = itm->value; printf("%7.2f ", itm->valueperweight); } cap = wghtSum / 2; } void KnapsackInstance::GenerateManually() { int i, wghtSum; /*weights[0] = 0; values[0] = 0; weights[1] = 5; values[1] = 55; weights[2] = 2; values[2] = 20; weights[3] = 7; values[3] = 63; weights[4] = 8; values[4] = 64; weights[5] = 13; values[5] = 91; weights[6] = 14; values[6] = 84; weights[7] = 25; values[7] = 125; weights[8] = 20; values[8] = 80; weights[9] = 2; values[9] = 6; weights[10] = 19; values[10] = 38;*/ wghtSum = 0; for (i = 1; i <= itemCnt; i++) { /*weights[i] = rand() % 100 + 1; values[i] = weights[i] + 10;*/ wghtSum += weights[i]; } cap = wghtSum / 2; //cap = 10; } /********************************************************************/ int KnapsackInstance::GetItemCnt() { return itemCnt; } /********************************************************************/ int KnapsackInstance::GetItemWeight(int itemNum) { return weights[itemNum]; } /********************************************************************/ int KnapsackInstance::GetItemValue(int itemNum) { return values[itemNum]; } /********************************************************************/ int KnapsackInstance::GetCapacity() { return cap; } /********************************************************************/ const int* KnapsackInstance::weight_ptr() { return weights; } const int* KnapsackInstance::value_ptr() { return values; } void KnapsackInstance::Print() { int i; printf("\nAfter Sorting\n"); printf("Number of items = %d, Capacity = %d\n", itemCnt, cap); printf("Weights: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", weights[i]); } printf("\nValues: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", values[i]); } printf("\n"); } /*****************************************************************************/ KnapsackSolution::KnapsackSolution(KnapsackInstance* inst_) { int i, itemCnt = inst_->GetItemCnt(); beforeSlackWeight = 0; beforeSlackValue = 0; lbValue = 0; slackItem = 1; inst = inst_; isTaken = new bool[itemCnt + 1]; for (i = 1; i <= itemCnt; i++) { isTaken[i] = false; } } /********************************************************************/ KnapsackSolution::~KnapsackSolution() { //delete [] isTaken; } /********************************************************************/ //bool KnapsackSolution::operator == (KnapsackSolution& otherSoln) //{ // return value == otherSoln.value; //} /********************************************************************/ void KnapsackSolution::Take(int itemNum) { if (slackItem > itemNum) { isTaken[itemNum] = true; } } void KnapsackSolution::DontTake(int itemNum) { isTaken[itemNum] = false; if (slackItem > itemNum) { beforeSlackWeight -= inst->GetItemWeight(itemNum); beforeSlackValue -= inst->GetItemValue(itemNum); } else { slackItem++; } } int KnapsackSolution::getbeforeSlackWeight() { return beforeSlackWeight; } int KnapsackSolution::getbeforeSlackValue() { return beforeSlackValue; } int KnapsackSolution::getslackItem() { return slackItem; } int KnapsackSolution::getlbValue() { return lbValue; } void KnapsackSolution::Copy(KnapsackSolution* otherSoln) { int i, itemCnt = inst->GetItemCnt(); for (i = 1; i <= itemCnt; i++) { isTaken[i] = otherSoln->isTaken[i]; } //value = otherSoln->value; beforeSlackValue = otherSoln->getbeforeSlackValue(); beforeSlackWeight = otherSoln->getbeforeSlackWeight(); slackItem = otherSoln->getslackItem(); lbValue = otherSoln->getlbValue(); } /********************************************************************/ void KnapsackSolution::Print(std::string title) { int i, itemCnt = inst->GetItemCnt(); printf("\n%s: ", title.c_str()); for (i = 1; i <= itemCnt; i++) { if (isTaken[i] == true) printf("%d ", i); } //printf("\nValue = %d\n",value); } /*****************************************************************************/ KnapsackBFSolver::KnapsackBFSolver() { crntSoln = NULL; } /********************************************************************/ KnapsackBFSolver::~KnapsackBFSolver() { if (crntSoln != NULL) delete crntSoln; } /********************************************************************/ void KnapsackBFSolver::Solve(KnapsackInstance* inst_, KnapsackSolution* soln_) { //saumya inst = inst_; bestSoln = soln_; int level = 0; std::deque <KnapsackSolution> solq; crntSoln = new KnapsackSolution(inst); crntSoln->update_bounds(); solq.push_back(*crntSoln); // Transferring memory to GPU transfer_knapsack(inst); initialize_memory_pool(); while (++level <= inst->GetItemCnt() && (!solq.empty() || md_vectorSizes > 0)) { if (solq.size() > GPU_SWITCH_THRESH && md_vectorSizes == 0) { #ifdef DEBUG_MODE_GPU std::cout << "Branching on GPU, Level: " << k << std::endl; #endif transfer_to_gpu(solq); *mh_bestLbValue = bestSoln->getlbValue(); } if (md_vectorSizes > 0) { #ifdef DEBUG_MODE_GPU std::cout << "-----------------------" << "Now on level: " << k << "--------------------" << std::endl; #endif branch_gpu(level); //bound_gpu(k); //historyBasedApproach(k); //int bestIndex = find_best_lb_index_gpu(k); //label_nodes_gpu(); //concatenate_lists_cpu_gpu(bestIndex); } else { branch(level, solq); bound(level, solq, *bestSoln); prune(level, solq, *bestSoln); } // Transfer from the GPU only if we have nodes on the GPU and qsize is 0 and we're under the threshold if (md_vectorSizes > 0 && md_vectorSizes < GPU_SWITCH_THRESH && solq.size() == 0) { #ifdef DEBUG_MODE_GPU std::cout << "Transfering from GPU, Level: " << k << std::endl; #endif //transfer_from_gpu(q, best, k); } } // This is if the initial solution is the best solution... if (level < bestSoln->getslackItem()) { // Means the solution was not finished for (int i = level; i < bestSoln->getslackItem(); ++i) { bestSoln->Take(i); } // Take any other items that might fit after slack items int w = inst->GetCapacity() - bestSoln->getbeforeSlackWeight(); for (int i = bestSoln->getslackItem(); i <inst->GetItemCnt(); ++i) { if (w >= inst->GetItemWeight(i)) { bestSoln->Take(i); w -= inst->GetItemWeight(i); } } } //auto soln = best->solution(); //assert(soln->value() == best->lb_value()); //return soln; /*while (++level <= inst->GetItemCnt()) { branch(level, solq); bound(level, solq, *bestSoln); prune(level, solq, *bestSoln); } int i = 0; while (i < solq.size()) { auto sol = std::move(solq.at(i)); i++; delete sol.isTaken; }*/ } /********************************************************************/ hipError_t KnapsackBFSolver::transfer_to_gpu(std::deque<KnapsackSolution>& q) { md_vectorSizes = q.size() * 2; // Create solutions vectors as one flat array with a stride of knapsack size between levels md_solutionVectorSize = q.size() * (inst->GetItemCnt() + 1) * 2; if (md_vectorSizes > md_nodeLimit) { std::cerr << "Not enough device memory... Aborting..."; throw std::runtime_error("Out of memory, GPU"); } // Transfer values into host vectors size_t qSize = q.size(); // Push back solution vectors now for (int i = 0; i < qSize; ++i) { auto node = std::move(q.front()); q.pop_front(); mh_beforeSlackValues[i] = node.getbeforeSlackValue(); mh_beforeSlackWeights[i] = node.getbeforeSlackWeight(); mh_lbValues[i] = node.getlbValue(); mh_ubValues[i] = node.getubValue(); mh_slackItems[i] = node.getslackItem(); for (int j = 1; j <= inst->GetItemCnt(); ++j) { mh_solutionVectors[i * (inst->GetItemCnt() + 1) + j] = node.isTaken[j]; } } auto err = hipMemcpy(&md_ubValues[0], &mh_ubValues[0], sizeof(int) * md_vectorSizes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy UB Values from Host to Device" << std::endl; goto Error; } err = hipMemcpy(&md_slackItems[0], &mh_slackItems[0], sizeof(int) * md_vectorSizes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy Slack Items from Host to Device" << std::endl; goto Error; } err = hipMemcpy(&md_beforeSlackValues[0], &mh_beforeSlackValues[0], sizeof(int) * md_vectorSizes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy Before Slack Values from Host to Device" << std::endl; goto Error; } err = hipMemcpy(&md_beforeSlackWeights[0], &mh_beforeSlackWeights[0], sizeof(int) * md_vectorSizes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy Before Slack Weights from Host to Device" << std::endl; goto Error; } err = hipMemcpy(&md_solutionVectors[0], &mh_solutionVectors[0], sizeof(bool) * md_solutionVectorSize, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy Solution Vector from Host to Device" << std::endl; goto Error; } err = hipMemcpy(&md_lbValues[0], &mh_lbValues[0], sizeof(int) * md_vectorSizes, hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << " Failed to Copy LB Values from Host to Device" << std::endl; goto Error; } Error: if (err != hipSuccess) { // Now see what memory got allocated and dealloc that free_gpu_memory(); throw std::runtime_error("Failed to copy host memory"); } return err; } void KnapsackBFSolver::branch_gpu(int k) { // Create grid for branch kernel, depends on size being halved so that it can split between the two different sections of memory int gridX = ((md_vectorSizes / 2 - 1) / BRANCH_BLOCK_SIZE) + 1; dim3 gridSize(gridX, 1, 1); dim3 blockSize(BRANCH_BLOCK_SIZE, 1, 1); // Launch branch kernel branch_kernel <<<gridSize, blockSize >> > (md_beforeSlackWeights, md_beforeSlackValues, md_slackItems, md_ubValues, md_solutionVectors, k, md_vectorSizes / 2); } void KnapsackBFSolver::free_gpu_memory() { if (md_rawData != nullptr) { hipFree(md_rawData); } if (mh_rawData != nullptr) { hipHostFree(mh_rawData); } // Set all pointers to nullptr md_rawData = nullptr; mh_rawData = nullptr; md_beforeSlackValues = nullptr; mh_beforeSlackValues = nullptr; md_beforeSlackWeights = nullptr; mh_beforeSlackWeights = nullptr; md_slackItems = nullptr; mh_slackItems = nullptr; md_lbValues = nullptr; mh_lbValues = nullptr; md_ubValues = nullptr; mh_ubValues = nullptr; md_labelTable = nullptr; mh_labelTable = nullptr; md_solutionVectors = nullptr; mh_solutionVectors = nullptr; md_vectorSizes = 0; md_solutionVectorSize = 0; } void KnapsackSolution::update_bounds() { // If the node has been labeled as non-promising, dont update bounds if (beforeSlackValue < 0) { return; } while (slackItem <= inst->GetItemCnt()) { int itemWght = inst->GetItemWeight(slackItem); int itemVal = inst->GetItemValue(slackItem); if (beforeSlackWeight + itemWght <= inst->GetCapacity()) { // We can fit this item in completely beforeSlackWeight += itemWght; beforeSlackValue += itemVal; // See if we can try to fit any more items from the knapsack slackItem++; } else { break; } } int lbItemIndex = slackItem + 1; lbValue = beforeSlackValue; int lbWeight = beforeSlackWeight; while (lbWeight < inst->GetCapacity() && lbItemIndex <= inst->GetItemCnt()) { // See if we can fit the item into the lower bound solution if (inst->GetItemWeight(lbItemIndex) <= (inst->GetCapacity() - lbWeight)) { lbWeight += inst->GetItemWeight(lbItemIndex); lbValue += inst->GetItemValue(lbItemIndex); } lbItemIndex++; } } int KnapsackSolution::getubValue() const { // This means that we have taken upto and not including slack item, so we // Dont need to search down this subtree anymore, prune this node if (beforeSlackValue < 0) { return INVALID_VALUE; } if (slackItem <= inst->GetItemCnt()) { int residualCapacity = inst->GetCapacity() - beforeSlackWeight; float p = static_cast<float>(residualCapacity)*static_cast<float>(inst->GetItemValue(slackItem)) / static_cast<float>(inst->GetItemWeight(slackItem)); return beforeSlackValue + p; } return static_cast<int>(beforeSlackValue); } void KnapsackBFSolver::branch(int k, std::deque<KnapsackSolution>& q) { //saving queue size since queue will be updated during computation int currentqsize = q.size(), i = 0; // taking and untaking items for each node in queue while (i < currentqsize) { KnapsackSolution solutionTakeitem = std::move(q.front()); q.pop_front(); KnapsackSolution solutionUntakeitem = KnapsackSolution(inst); solutionUntakeitem.Copy(&solutionTakeitem); solutionTakeitem.Take(k); solutionUntakeitem.DontTake(k); q.push_back(std::move(solutionTakeitem)); q.push_back(std::move(solutionUntakeitem)); ++i; } } void KnapsackBFSolver::bound(int k, std::deque<KnapsackSolution>& q, KnapsackSolution& best) { int qSize = q.size(); for (int i = 0; i < qSize; ++i) { auto soln = std::move(q.front()); q.pop_front(); // Update bounds soln.update_bounds(); // See if it is better than the best, if it is, update best if (soln.getlbValue() > best.getlbValue()) { best.Copy(&soln); } q.push_back(std::move(soln)); } if (k < best.getslackItem()) { // Means the solution was not finished for (int i = k + 1; i < best.getslackItem(); ++i) { best.Take(i); } // Take any other items that might fit after slack items int w = inst->GetCapacity() - best.getbeforeSlackWeight(); for (int i = best.getslackItem() + 1; i < inst->GetItemCnt(); ++i) { if (w >= inst->GetItemWeight(i)) { best.Take(i); w -= inst->GetItemWeight(i); } } } } void KnapsackBFSolver::prune(int k, std::deque<KnapsackSolution>& q, KnapsackSolution& best) { int qSize = q.size(); std::deque<KnapsackSolution> historyTable; for (int i = 0; i < qSize; ++i) { auto soln = std::move(q.front()); q.pop_front(); int soln_ub_value = soln.getubValue(); int soln_before_slack_value = soln.getbeforeSlackValue(); int soln_before_slack_weight = soln.getbeforeSlackWeight(); if (soln_ub_value > best.getlbValue())/*If it is a valid solution*/ { if (historyTable.size() == 0) { historyTable.push_back(std::move(soln)); } else { /*comparing with the last element and placing at the last*/ auto last = historyTable.at(historyTable.size() - 1); if (last.getbeforeSlackWeight() < soln_before_slack_weight) { if (last.getbeforeSlackValue() >= soln_before_slack_value) { continue; } else { historyTable.push_back(std::move(soln)); } } else { /*checking if we can place at the beginning of the table*/ auto first = historyTable.at(0); if (first.getbeforeSlackWeight() > soln_before_slack_weight) { if (soln_before_slack_value < first.getbeforeSlackValue()) { historyTable.push_front(std::move(soln)); } else { int index = 0; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } historyTable.erase(historyTable.begin(), historyTable.begin() + index - 1); historyTable.push_front(std::move(soln)); } } else { int n = node_insert(historyTable, soln_before_slack_weight); if (historyTable.at(n).getbeforeSlackWeight() == soln_before_slack_weight) { auto equal = std::move(historyTable.at(n)); auto equal_bsv = equal.getbeforeSlackValue(); if (soln_before_slack_value <= equal_bsv) { std::swap(equal, historyTable.at(n)); continue; } else { /*std::unique_ptr<bfs_solution> m; std::swap(m, historyTable.at(mid));*/ equal.Copy(&soln); std::swap(equal, historyTable.at(n)); int index = n + 1; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } //historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); if ((index - n) == 1) { } else { historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); } } } else { auto less = std::move(historyTable.at(n - 1)); auto less_bsv = less.getbeforeSlackValue(); if (soln_before_slack_value <= less_bsv) { std::swap(less, historyTable.at(n - 1)); continue; } else { auto high = std::move(historyTable.at(n)); auto high_bsv = high.getbeforeSlackValue(); if (soln_before_slack_value >= high_bsv) { high.Copy(&soln); std::swap(high, historyTable.at(n)); int index = n + 1; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } //historyTable.erase(historyTable.begin() + n, historyTable.begin() + index - 1); if ((index - n) == 1) { } else { historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); } } else { historyTable.insert(historyTable.begin() + n, std::move(soln)); } } } } } } } } q = historyTable; } int KnapsackBFSolver::node_insert(std::deque<KnapsackSolution>& historyTable, int weight) { int lowerBound = 0; int upperBound = historyTable.size() - 1; int curIn = 0; while (true) { curIn = (upperBound + lowerBound) / 2; if (historyTable.at(curIn).getbeforeSlackWeight() == weight) { return curIn; } else if (historyTable.at(curIn).getbeforeSlackWeight() < weight) { lowerBound = curIn + 1; // its in the upper if (lowerBound > upperBound) return curIn + 1; } else { upperBound = curIn - 1; // its in the lower if (lowerBound > upperBound) return curIn; } } } hipError_t KnapsackBFSolver::initialize_memory_pool() { // Determine how much free space we have and allocate as much as possible size_t free, total; hipMemGetInfo(&free, &total); auto size = static_cast<size_t>(static_cast<double>(free - 1)*.985 + 1) / sizeof(int); auto intSize = sizeof(int); #ifdef DEBUG_MODE_GPU std::cout << "Free: " << free << ", Total: " << total << std::endl; std::cout << "Allocating " << size << " ints" << std::endl; #endif auto err = hipMalloc(&md_rawData, size * sizeof(int)); if (err != hipSuccess) { std::cerr << "Failed to allocate device memory pool for algorithm, aborting..."; goto Error; } // Allocate same pool on the host err = hipHostMalloc(&mh_rawData, size * sizeof(int)); if (err != hipSuccess) { std::cerr << "Failed to allocate host memory pool for algorithm, aborting..."; goto Error; } // Calculate how much memory each part will take size_t ksSize = inst->GetItemCnt(); size_t solVectorSize = ((ksSize + 1) * sizeof(bool) / sizeof(int) + 1) * sizeof(int); size_t perNodeValueSize = 6 * sizeof(int); size_t perNodeSize = solVectorSize + perNodeValueSize; // Split memory by percentage of per space size auto valuePercent = static_cast<double>(perNodeValueSize) / static_cast<double>(perNodeSize); auto solVectorPercent = static_cast<double>(solVectorSize) / static_cast<double>(perNodeSize); size_t bytesForValues = valuePercent * (size - 2) * sizeof(int); size_t bytesForSol = solVectorPercent * (size - 2) * sizeof(int); // How many nodes can we store in each? size_t nodesForValues = bytesForValues / (6 * sizeof(int)); size_t nodesForSol = bytesForSol / solVectorSize; size_t intsPerValue = bytesForValues / 6 / sizeof(int); // Initialze device sizes md_vectorSizes = 0; md_solutionVectorSize = 0; md_nodeLimit = (nodesForValues < nodesForSol ? nodesForValues : nodesForSol); // Initailze pointers for device md_beforeSlackValues = static_cast<int*>(md_rawData); md_beforeSlackWeights = static_cast<int*>(md_rawData) + intsPerValue; md_slackItems = static_cast<int*>(md_rawData) + intsPerValue * 2; md_lbValues = static_cast<int*>(md_rawData) + intsPerValue * 3; md_ubValues = static_cast<int*>(md_rawData) + intsPerValue * 4; md_labelTable = static_cast<int*>(md_rawData) + intsPerValue * 5; md_solutionVectors = reinterpret_cast<bool*>(static_cast<int*>(md_rawData) + intsPerValue * 6); md_bestLbValue = static_cast<int*>(static_cast<int*>(md_rawData) + (size - 1)); md_bestIndex = static_cast<int*>(static_cast<int*>(md_rawData) + (size - 2)); #ifdef DEBUG_MODE_GPU md_endBeforeSlackValues = static_cast<int*>(md_rawData) + intsPerValue; md_endBeforeSlackWeights = static_cast<int*>(md_rawData) + intsPerValue * 2; md_endSlackItems = static_cast<int*>(md_rawData) + intsPerValue * 3; md_endLbValues = static_cast<int*>(md_rawData) + intsPerValue * 4; md_endUbValues = static_cast<int*>(md_rawData) + intsPerValue * 5; md_endLabelTable = static_cast<int*>(md_rawData) + intsPerValue * 6; md_endSolutionVectors = reinterpret_cast<bool*>(static_cast<int*>(md_rawData) + intsPerValue * 6 + (bytesForSol / sizeof(int) - 2)); assert(static_cast<void*>(md_endSolutionVectors) <= md_bestIndex); #endif // Test values auto endSolVector = reinterpret_cast<int*>(md_solutionVectors) + bytesForSol; auto endRawData = static_cast<int*>(md_rawData) + size; // Initialize pointers for host mh_beforeSlackValues = static_cast<int*>(mh_rawData); mh_beforeSlackWeights = static_cast<int*>(mh_rawData) + intsPerValue; mh_slackItems = static_cast<int*>(mh_rawData) + intsPerValue * 2; mh_lbValues = static_cast<int*>(mh_rawData) + intsPerValue * 3; mh_ubValues = static_cast<int*>(mh_rawData) + intsPerValue * 4; mh_labelTable = static_cast<int*>(mh_rawData) + intsPerValue * 5; mh_solutionVectors = reinterpret_cast<bool*>(static_cast<int*>(mh_rawData) + intsPerValue * 6); mh_bestLbValue = static_cast<int*>(static_cast<int*>(mh_rawData) + (size - 1)); mh_bestIndex = static_cast<int*>(static_cast<int*>(mh_rawData) + (size - 1)); #ifdef DEBUG_MODE_GPU std::cout << "Nodes for values: " << nodesForValues << " Nodes for Solution Vectors: " << nodesForSol << std::endl; #endif Error: if (err != hipSuccess) { if (md_rawData != nullptr) { hipFree(md_rawData); } if (mh_rawData != nullptr) { hipHostFree(mh_rawData); } throw std::runtime_error("Failed to allocate memory pool"); } return err; } /*****************************************************************************/
0f7bdfbc780dbdd8c72410430edbf6517cdbf5be.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <string.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include "knapsack.cuh" #include <iostream> #include <thrust/device_vector.h> #include "kernels.cuh" #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <cassert> #define TIMEB struct timeb #define FTIME ftime #define UDT_TIME long #define MAX_SIZE_TO_PRINT 10 UDT_TIME gRefTime = 0; UDT_TIME GetMilliSecondTime(TIMEB timeBuf); void SetTime(void); UDT_TIME GetTime(void); int main(int argc, char* argv[]) { UDT_TIME time; int itemCnt; KnapsackInstance* inst; //a Knapsack instance object KnapsackDPSolver DPSolver; KnapsackBFSolver BFSolver; //brute-force solver KnapsackSolution *BFSoln; if (argc != 2) { printf("Invalid Number of command-line arguments\n"); exit(1); } itemCnt = atoi(argv[1]); if (itemCnt < 1) { printf("Invalid number of items\n"); exit(1); } // Creating Knapsack and solution object inst = new KnapsackInstance(itemCnt); BFSoln = new KnapsackSolution(inst); // Item Generation inside knapsack inst->Generate(); // Print knapsack items inst->Print(); SetTime(); DPSolver.Solve(inst, BFSoln); time = GetTime(); printf("\n\nSolved using dynamic programming (DP) in %ld ms. Optimal value = %d", time, BFSoln->getDPValue()); if (itemCnt <= MAX_SIZE_TO_PRINT) BFSoln->Print("Dynamic Programming Solution"); SetTime(); BFSolver.Solve(inst, BFSoln); printf("\n\nSolved using brute-force enumeration (BF) in %ld ms. Optimal value = %d", time, BFSoln->getlbValue()); if (itemCnt <= MAX_SIZE_TO_PRINT) BFSoln->Print("Brute-Force Solution"); delete inst; delete BFSoln; printf("\n\nProgram Completed Successfully\n"); return 0; } /********************************************************************/ UDT_TIME GetCurrentTime(void) { UDT_TIME crntTime = 0; TIMEB timeBuf; FTIME(&timeBuf); crntTime = GetMilliSecondTime(timeBuf); return crntTime; } /********************************************************************/ void SetTime(void) { gRefTime = GetCurrentTime(); } /********************************************************************/ UDT_TIME GetTime(void) { UDT_TIME crntTime = GetCurrentTime(); return (crntTime - gRefTime); } /********************************************************************/ UDT_TIME GetMilliSecondTime(TIMEB timeBuf) { UDT_TIME mliScndTime; mliScndTime = timeBuf.time; mliScndTime *= 1000; mliScndTime += timeBuf.millitm; return mliScndTime; } class KnapsackItem { public: int weight; int value; float valueperweight; KnapsackItem(int wght, int val) { weight = wght; value = val; valueperweight = (float)val / (float)wght; } }; void KnapsackDPSolver::Solve(KnapsackInstance* inst_, KnapsackSolution* soln_) { inst = inst_; soln = soln_; int n = inst->GetItemCnt(); int c = inst->GetCapacity(); int** a; a = new int*[n + 1]; for (int i = 0; i < n + 1; i++) { a[i] = new int[c + 1]; } for (int j = 0; j <= c; j++) a[0][j] = 0; for (int i = 1; i <= n; i++) { for (int j = 0; j <= c; j++) { if (inst->GetItemWeight(i) > j) a[i][j] = a[i - 1][j]; else a[i][j] = std::max(inst->GetItemValue(i) + a[i - 1][j - inst->GetItemWeight(i)], a[i - 1][j]); } } soln->setDPValue(a[n][c]); int j = c; for (int i = n; i >= 1; i--) { if (a[i][j] > a[i - 1][j]) { printf("%d ", i); j -= inst->GetItemWeight(i); } } delete a; } void KnapsackSolution::setDPValue(int val) { dpvalue = val; } int KnapsackSolution::getDPValue() { return dpvalue; } KnapsackInstance::KnapsackInstance(int itemCnt_) { itemCnt = itemCnt_; weights = new int[itemCnt + 1]; values = new int[itemCnt + 1]; cap = 0; } /********************************************************************/ KnapsackInstance::~KnapsackInstance() { delete[] weights; delete[] values; } /********************************************************************/ void KnapsackInstance::Generate() { int i, wghtSum = 0; std::vector<KnapsackItem> items; weights[0] = 0; values[0] = 0; for (i = 1; i <= itemCnt; i++) { weights[i] = rand() % 100 + 1; values[i] = weights[i] + 10; items.push_back(KnapsackItem(weights[i], values[i])); wghtSum += weights[i]; } std::sort(items.begin(), items.end(), [](KnapsackItem const & a, KnapsackItem const & b) -> bool { return a.valueperweight > b.valueperweight; }); printf("Number of items = %d, Capacity = %d\n", itemCnt, cap); printf("Weights: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", weights[i]); } printf("\nValues: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", values[i]); } printf("\n"); i = 1; for (auto itm = items.begin(); itm != items.end(); itm++, i++) { weights[i] = itm->weight; values[i] = itm->value; printf("%7.2f ", itm->valueperweight); } cap = wghtSum / 2; } void KnapsackInstance::GenerateManually() { int i, wghtSum; /*weights[0] = 0; values[0] = 0; weights[1] = 5; values[1] = 55; weights[2] = 2; values[2] = 20; weights[3] = 7; values[3] = 63; weights[4] = 8; values[4] = 64; weights[5] = 13; values[5] = 91; weights[6] = 14; values[6] = 84; weights[7] = 25; values[7] = 125; weights[8] = 20; values[8] = 80; weights[9] = 2; values[9] = 6; weights[10] = 19; values[10] = 38;*/ wghtSum = 0; for (i = 1; i <= itemCnt; i++) { /*weights[i] = rand() % 100 + 1; values[i] = weights[i] + 10;*/ wghtSum += weights[i]; } cap = wghtSum / 2; //cap = 10; } /********************************************************************/ int KnapsackInstance::GetItemCnt() { return itemCnt; } /********************************************************************/ int KnapsackInstance::GetItemWeight(int itemNum) { return weights[itemNum]; } /********************************************************************/ int KnapsackInstance::GetItemValue(int itemNum) { return values[itemNum]; } /********************************************************************/ int KnapsackInstance::GetCapacity() { return cap; } /********************************************************************/ const int* KnapsackInstance::weight_ptr() { return weights; } const int* KnapsackInstance::value_ptr() { return values; } void KnapsackInstance::Print() { int i; printf("\nAfter Sorting\n"); printf("Number of items = %d, Capacity = %d\n", itemCnt, cap); printf("Weights: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", weights[i]); } printf("\nValues: "); for (i = 1; i <= itemCnt; i++) { printf("%d ", values[i]); } printf("\n"); } /*****************************************************************************/ KnapsackSolution::KnapsackSolution(KnapsackInstance* inst_) { int i, itemCnt = inst_->GetItemCnt(); beforeSlackWeight = 0; beforeSlackValue = 0; lbValue = 0; slackItem = 1; inst = inst_; isTaken = new bool[itemCnt + 1]; for (i = 1; i <= itemCnt; i++) { isTaken[i] = false; } } /********************************************************************/ KnapsackSolution::~KnapsackSolution() { //delete [] isTaken; } /********************************************************************/ //bool KnapsackSolution::operator == (KnapsackSolution& otherSoln) //{ // return value == otherSoln.value; //} /********************************************************************/ void KnapsackSolution::Take(int itemNum) { if (slackItem > itemNum) { isTaken[itemNum] = true; } } void KnapsackSolution::DontTake(int itemNum) { isTaken[itemNum] = false; if (slackItem > itemNum) { beforeSlackWeight -= inst->GetItemWeight(itemNum); beforeSlackValue -= inst->GetItemValue(itemNum); } else { slackItem++; } } int KnapsackSolution::getbeforeSlackWeight() { return beforeSlackWeight; } int KnapsackSolution::getbeforeSlackValue() { return beforeSlackValue; } int KnapsackSolution::getslackItem() { return slackItem; } int KnapsackSolution::getlbValue() { return lbValue; } void KnapsackSolution::Copy(KnapsackSolution* otherSoln) { int i, itemCnt = inst->GetItemCnt(); for (i = 1; i <= itemCnt; i++) { isTaken[i] = otherSoln->isTaken[i]; } //value = otherSoln->value; beforeSlackValue = otherSoln->getbeforeSlackValue(); beforeSlackWeight = otherSoln->getbeforeSlackWeight(); slackItem = otherSoln->getslackItem(); lbValue = otherSoln->getlbValue(); } /********************************************************************/ void KnapsackSolution::Print(std::string title) { int i, itemCnt = inst->GetItemCnt(); printf("\n%s: ", title.c_str()); for (i = 1; i <= itemCnt; i++) { if (isTaken[i] == true) printf("%d ", i); } //printf("\nValue = %d\n",value); } /*****************************************************************************/ KnapsackBFSolver::KnapsackBFSolver() { crntSoln = NULL; } /********************************************************************/ KnapsackBFSolver::~KnapsackBFSolver() { if (crntSoln != NULL) delete crntSoln; } /********************************************************************/ void KnapsackBFSolver::Solve(KnapsackInstance* inst_, KnapsackSolution* soln_) { //saumya inst = inst_; bestSoln = soln_; int level = 0; std::deque <KnapsackSolution> solq; crntSoln = new KnapsackSolution(inst); crntSoln->update_bounds(); solq.push_back(*crntSoln); // Transferring memory to GPU transfer_knapsack(inst); initialize_memory_pool(); while (++level <= inst->GetItemCnt() && (!solq.empty() || md_vectorSizes > 0)) { if (solq.size() > GPU_SWITCH_THRESH && md_vectorSizes == 0) { #ifdef DEBUG_MODE_GPU std::cout << "Branching on GPU, Level: " << k << std::endl; #endif transfer_to_gpu(solq); *mh_bestLbValue = bestSoln->getlbValue(); } if (md_vectorSizes > 0) { #ifdef DEBUG_MODE_GPU std::cout << "-----------------------" << "Now on level: " << k << "--------------------" << std::endl; #endif branch_gpu(level); //bound_gpu(k); //historyBasedApproach(k); //int bestIndex = find_best_lb_index_gpu(k); //label_nodes_gpu(); //concatenate_lists_cpu_gpu(bestIndex); } else { branch(level, solq); bound(level, solq, *bestSoln); prune(level, solq, *bestSoln); } // Transfer from the GPU only if we have nodes on the GPU and qsize is 0 and we're under the threshold if (md_vectorSizes > 0 && md_vectorSizes < GPU_SWITCH_THRESH && solq.size() == 0) { #ifdef DEBUG_MODE_GPU std::cout << "Transfering from GPU, Level: " << k << std::endl; #endif //transfer_from_gpu(q, best, k); } } // This is if the initial solution is the best solution... if (level < bestSoln->getslackItem()) { // Means the solution was not finished for (int i = level; i < bestSoln->getslackItem(); ++i) { bestSoln->Take(i); } // Take any other items that might fit after slack items int w = inst->GetCapacity() - bestSoln->getbeforeSlackWeight(); for (int i = bestSoln->getslackItem(); i <inst->GetItemCnt(); ++i) { if (w >= inst->GetItemWeight(i)) { bestSoln->Take(i); w -= inst->GetItemWeight(i); } } } //auto soln = best->solution(); //assert(soln->value() == best->lb_value()); //return soln; /*while (++level <= inst->GetItemCnt()) { branch(level, solq); bound(level, solq, *bestSoln); prune(level, solq, *bestSoln); } int i = 0; while (i < solq.size()) { auto sol = std::move(solq.at(i)); i++; delete sol.isTaken; }*/ } /********************************************************************/ cudaError_t KnapsackBFSolver::transfer_to_gpu(std::deque<KnapsackSolution>& q) { md_vectorSizes = q.size() * 2; // Create solutions vectors as one flat array with a stride of knapsack size between levels md_solutionVectorSize = q.size() * (inst->GetItemCnt() + 1) * 2; if (md_vectorSizes > md_nodeLimit) { std::cerr << "Not enough device memory... Aborting..."; throw std::runtime_error("Out of memory, GPU"); } // Transfer values into host vectors size_t qSize = q.size(); // Push back solution vectors now for (int i = 0; i < qSize; ++i) { auto node = std::move(q.front()); q.pop_front(); mh_beforeSlackValues[i] = node.getbeforeSlackValue(); mh_beforeSlackWeights[i] = node.getbeforeSlackWeight(); mh_lbValues[i] = node.getlbValue(); mh_ubValues[i] = node.getubValue(); mh_slackItems[i] = node.getslackItem(); for (int j = 1; j <= inst->GetItemCnt(); ++j) { mh_solutionVectors[i * (inst->GetItemCnt() + 1) + j] = node.isTaken[j]; } } auto err = cudaMemcpy(&md_ubValues[0], &mh_ubValues[0], sizeof(int) * md_vectorSizes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy UB Values from Host to Device" << std::endl; goto Error; } err = cudaMemcpy(&md_slackItems[0], &mh_slackItems[0], sizeof(int) * md_vectorSizes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy Slack Items from Host to Device" << std::endl; goto Error; } err = cudaMemcpy(&md_beforeSlackValues[0], &mh_beforeSlackValues[0], sizeof(int) * md_vectorSizes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy Before Slack Values from Host to Device" << std::endl; goto Error; } err = cudaMemcpy(&md_beforeSlackWeights[0], &mh_beforeSlackWeights[0], sizeof(int) * md_vectorSizes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy Before Slack Weights from Host to Device" << std::endl; goto Error; } err = cudaMemcpy(&md_solutionVectors[0], &mh_solutionVectors[0], sizeof(bool) * md_solutionVectorSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy Solution Vector from Host to Device" << std::endl; goto Error; } err = cudaMemcpy(&md_lbValues[0], &mh_lbValues[0], sizeof(int) * md_vectorSizes, cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << " Failed to Copy LB Values from Host to Device" << std::endl; goto Error; } Error: if (err != cudaSuccess) { // Now see what memory got allocated and dealloc that free_gpu_memory(); throw std::runtime_error("Failed to copy host memory"); } return err; } void KnapsackBFSolver::branch_gpu(int k) { // Create grid for branch kernel, depends on size being halved so that it can split between the two different sections of memory int gridX = ((md_vectorSizes / 2 - 1) / BRANCH_BLOCK_SIZE) + 1; dim3 gridSize(gridX, 1, 1); dim3 blockSize(BRANCH_BLOCK_SIZE, 1, 1); // Launch branch kernel branch_kernel <<<gridSize, blockSize >> > (md_beforeSlackWeights, md_beforeSlackValues, md_slackItems, md_ubValues, md_solutionVectors, k, md_vectorSizes / 2); } void KnapsackBFSolver::free_gpu_memory() { if (md_rawData != nullptr) { cudaFree(md_rawData); } if (mh_rawData != nullptr) { cudaFreeHost(mh_rawData); } // Set all pointers to nullptr md_rawData = nullptr; mh_rawData = nullptr; md_beforeSlackValues = nullptr; mh_beforeSlackValues = nullptr; md_beforeSlackWeights = nullptr; mh_beforeSlackWeights = nullptr; md_slackItems = nullptr; mh_slackItems = nullptr; md_lbValues = nullptr; mh_lbValues = nullptr; md_ubValues = nullptr; mh_ubValues = nullptr; md_labelTable = nullptr; mh_labelTable = nullptr; md_solutionVectors = nullptr; mh_solutionVectors = nullptr; md_vectorSizes = 0; md_solutionVectorSize = 0; } void KnapsackSolution::update_bounds() { // If the node has been labeled as non-promising, dont update bounds if (beforeSlackValue < 0) { return; } while (slackItem <= inst->GetItemCnt()) { int itemWght = inst->GetItemWeight(slackItem); int itemVal = inst->GetItemValue(slackItem); if (beforeSlackWeight + itemWght <= inst->GetCapacity()) { // We can fit this item in completely beforeSlackWeight += itemWght; beforeSlackValue += itemVal; // See if we can try to fit any more items from the knapsack slackItem++; } else { break; } } int lbItemIndex = slackItem + 1; lbValue = beforeSlackValue; int lbWeight = beforeSlackWeight; while (lbWeight < inst->GetCapacity() && lbItemIndex <= inst->GetItemCnt()) { // See if we can fit the item into the lower bound solution if (inst->GetItemWeight(lbItemIndex) <= (inst->GetCapacity() - lbWeight)) { lbWeight += inst->GetItemWeight(lbItemIndex); lbValue += inst->GetItemValue(lbItemIndex); } lbItemIndex++; } } int KnapsackSolution::getubValue() const { // This means that we have taken upto and not including slack item, so we // Dont need to search down this subtree anymore, prune this node if (beforeSlackValue < 0) { return INVALID_VALUE; } if (slackItem <= inst->GetItemCnt()) { int residualCapacity = inst->GetCapacity() - beforeSlackWeight; float p = static_cast<float>(residualCapacity)*static_cast<float>(inst->GetItemValue(slackItem)) / static_cast<float>(inst->GetItemWeight(slackItem)); return beforeSlackValue + p; } return static_cast<int>(beforeSlackValue); } void KnapsackBFSolver::branch(int k, std::deque<KnapsackSolution>& q) { //saving queue size since queue will be updated during computation int currentqsize = q.size(), i = 0; // taking and untaking items for each node in queue while (i < currentqsize) { KnapsackSolution solutionTakeitem = std::move(q.front()); q.pop_front(); KnapsackSolution solutionUntakeitem = KnapsackSolution(inst); solutionUntakeitem.Copy(&solutionTakeitem); solutionTakeitem.Take(k); solutionUntakeitem.DontTake(k); q.push_back(std::move(solutionTakeitem)); q.push_back(std::move(solutionUntakeitem)); ++i; } } void KnapsackBFSolver::bound(int k, std::deque<KnapsackSolution>& q, KnapsackSolution& best) { int qSize = q.size(); for (int i = 0; i < qSize; ++i) { auto soln = std::move(q.front()); q.pop_front(); // Update bounds soln.update_bounds(); // See if it is better than the best, if it is, update best if (soln.getlbValue() > best.getlbValue()) { best.Copy(&soln); } q.push_back(std::move(soln)); } if (k < best.getslackItem()) { // Means the solution was not finished for (int i = k + 1; i < best.getslackItem(); ++i) { best.Take(i); } // Take any other items that might fit after slack items int w = inst->GetCapacity() - best.getbeforeSlackWeight(); for (int i = best.getslackItem() + 1; i < inst->GetItemCnt(); ++i) { if (w >= inst->GetItemWeight(i)) { best.Take(i); w -= inst->GetItemWeight(i); } } } } void KnapsackBFSolver::prune(int k, std::deque<KnapsackSolution>& q, KnapsackSolution& best) { int qSize = q.size(); std::deque<KnapsackSolution> historyTable; for (int i = 0; i < qSize; ++i) { auto soln = std::move(q.front()); q.pop_front(); int soln_ub_value = soln.getubValue(); int soln_before_slack_value = soln.getbeforeSlackValue(); int soln_before_slack_weight = soln.getbeforeSlackWeight(); if (soln_ub_value > best.getlbValue())/*If it is a valid solution*/ { if (historyTable.size() == 0) { historyTable.push_back(std::move(soln)); } else { /*comparing with the last element and placing at the last*/ auto last = historyTable.at(historyTable.size() - 1); if (last.getbeforeSlackWeight() < soln_before_slack_weight) { if (last.getbeforeSlackValue() >= soln_before_slack_value) { continue; } else { historyTable.push_back(std::move(soln)); } } else { /*checking if we can place at the beginning of the table*/ auto first = historyTable.at(0); if (first.getbeforeSlackWeight() > soln_before_slack_weight) { if (soln_before_slack_value < first.getbeforeSlackValue()) { historyTable.push_front(std::move(soln)); } else { int index = 0; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } historyTable.erase(historyTable.begin(), historyTable.begin() + index - 1); historyTable.push_front(std::move(soln)); } } else { int n = node_insert(historyTable, soln_before_slack_weight); if (historyTable.at(n).getbeforeSlackWeight() == soln_before_slack_weight) { auto equal = std::move(historyTable.at(n)); auto equal_bsv = equal.getbeforeSlackValue(); if (soln_before_slack_value <= equal_bsv) { std::swap(equal, historyTable.at(n)); continue; } else { /*std::unique_ptr<bfs_solution> m; std::swap(m, historyTable.at(mid));*/ equal.Copy(&soln); std::swap(equal, historyTable.at(n)); int index = n + 1; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } //historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); if ((index - n) == 1) { } else { historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); } } } else { auto less = std::move(historyTable.at(n - 1)); auto less_bsv = less.getbeforeSlackValue(); if (soln_before_slack_value <= less_bsv) { std::swap(less, historyTable.at(n - 1)); continue; } else { auto high = std::move(historyTable.at(n)); auto high_bsv = high.getbeforeSlackValue(); if (soln_before_slack_value >= high_bsv) { high.Copy(&soln); std::swap(high, historyTable.at(n)); int index = n + 1; while (index < historyTable.size() && historyTable.at(index).getbeforeSlackValue() <= soln_before_slack_value) { index++; } //historyTable.erase(historyTable.begin() + n, historyTable.begin() + index - 1); if ((index - n) == 1) { } else { historyTable.erase(historyTable.begin() + n + 1, historyTable.begin() + index - 1); } } else { historyTable.insert(historyTable.begin() + n, std::move(soln)); } } } } } } } } q = historyTable; } int KnapsackBFSolver::node_insert(std::deque<KnapsackSolution>& historyTable, int weight) { int lowerBound = 0; int upperBound = historyTable.size() - 1; int curIn = 0; while (true) { curIn = (upperBound + lowerBound) / 2; if (historyTable.at(curIn).getbeforeSlackWeight() == weight) { return curIn; } else if (historyTable.at(curIn).getbeforeSlackWeight() < weight) { lowerBound = curIn + 1; // its in the upper if (lowerBound > upperBound) return curIn + 1; } else { upperBound = curIn - 1; // its in the lower if (lowerBound > upperBound) return curIn; } } } cudaError_t KnapsackBFSolver::initialize_memory_pool() { // Determine how much free space we have and allocate as much as possible size_t free, total; cudaMemGetInfo(&free, &total); auto size = static_cast<size_t>(static_cast<double>(free - 1)*.985 + 1) / sizeof(int); auto intSize = sizeof(int); #ifdef DEBUG_MODE_GPU std::cout << "Free: " << free << ", Total: " << total << std::endl; std::cout << "Allocating " << size << " ints" << std::endl; #endif auto err = cudaMalloc(&md_rawData, size * sizeof(int)); if (err != cudaSuccess) { std::cerr << "Failed to allocate device memory pool for algorithm, aborting..."; goto Error; } // Allocate same pool on the host err = cudaMallocHost(&mh_rawData, size * sizeof(int)); if (err != cudaSuccess) { std::cerr << "Failed to allocate host memory pool for algorithm, aborting..."; goto Error; } // Calculate how much memory each part will take size_t ksSize = inst->GetItemCnt(); size_t solVectorSize = ((ksSize + 1) * sizeof(bool) / sizeof(int) + 1) * sizeof(int); size_t perNodeValueSize = 6 * sizeof(int); size_t perNodeSize = solVectorSize + perNodeValueSize; // Split memory by percentage of per space size auto valuePercent = static_cast<double>(perNodeValueSize) / static_cast<double>(perNodeSize); auto solVectorPercent = static_cast<double>(solVectorSize) / static_cast<double>(perNodeSize); size_t bytesForValues = valuePercent * (size - 2) * sizeof(int); size_t bytesForSol = solVectorPercent * (size - 2) * sizeof(int); // How many nodes can we store in each? size_t nodesForValues = bytesForValues / (6 * sizeof(int)); size_t nodesForSol = bytesForSol / solVectorSize; size_t intsPerValue = bytesForValues / 6 / sizeof(int); // Initialze device sizes md_vectorSizes = 0; md_solutionVectorSize = 0; md_nodeLimit = (nodesForValues < nodesForSol ? nodesForValues : nodesForSol); // Initailze pointers for device md_beforeSlackValues = static_cast<int*>(md_rawData); md_beforeSlackWeights = static_cast<int*>(md_rawData) + intsPerValue; md_slackItems = static_cast<int*>(md_rawData) + intsPerValue * 2; md_lbValues = static_cast<int*>(md_rawData) + intsPerValue * 3; md_ubValues = static_cast<int*>(md_rawData) + intsPerValue * 4; md_labelTable = static_cast<int*>(md_rawData) + intsPerValue * 5; md_solutionVectors = reinterpret_cast<bool*>(static_cast<int*>(md_rawData) + intsPerValue * 6); md_bestLbValue = static_cast<int*>(static_cast<int*>(md_rawData) + (size - 1)); md_bestIndex = static_cast<int*>(static_cast<int*>(md_rawData) + (size - 2)); #ifdef DEBUG_MODE_GPU md_endBeforeSlackValues = static_cast<int*>(md_rawData) + intsPerValue; md_endBeforeSlackWeights = static_cast<int*>(md_rawData) + intsPerValue * 2; md_endSlackItems = static_cast<int*>(md_rawData) + intsPerValue * 3; md_endLbValues = static_cast<int*>(md_rawData) + intsPerValue * 4; md_endUbValues = static_cast<int*>(md_rawData) + intsPerValue * 5; md_endLabelTable = static_cast<int*>(md_rawData) + intsPerValue * 6; md_endSolutionVectors = reinterpret_cast<bool*>(static_cast<int*>(md_rawData) + intsPerValue * 6 + (bytesForSol / sizeof(int) - 2)); assert(static_cast<void*>(md_endSolutionVectors) <= md_bestIndex); #endif // Test values auto endSolVector = reinterpret_cast<int*>(md_solutionVectors) + bytesForSol; auto endRawData = static_cast<int*>(md_rawData) + size; // Initialize pointers for host mh_beforeSlackValues = static_cast<int*>(mh_rawData); mh_beforeSlackWeights = static_cast<int*>(mh_rawData) + intsPerValue; mh_slackItems = static_cast<int*>(mh_rawData) + intsPerValue * 2; mh_lbValues = static_cast<int*>(mh_rawData) + intsPerValue * 3; mh_ubValues = static_cast<int*>(mh_rawData) + intsPerValue * 4; mh_labelTable = static_cast<int*>(mh_rawData) + intsPerValue * 5; mh_solutionVectors = reinterpret_cast<bool*>(static_cast<int*>(mh_rawData) + intsPerValue * 6); mh_bestLbValue = static_cast<int*>(static_cast<int*>(mh_rawData) + (size - 1)); mh_bestIndex = static_cast<int*>(static_cast<int*>(mh_rawData) + (size - 1)); #ifdef DEBUG_MODE_GPU std::cout << "Nodes for values: " << nodesForValues << " Nodes for Solution Vectors: " << nodesForSol << std::endl; #endif Error: if (err != cudaSuccess) { if (md_rawData != nullptr) { cudaFree(md_rawData); } if (mh_rawData != nullptr) { cudaFreeHost(mh_rawData); } throw std::runtime_error("Failed to allocate memory pool"); } return err; } /*****************************************************************************/
64768e4737c68a22ca03d211a9df234a59bd2e81.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "functors.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include <opencv2/core.hpp> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, class EltwiseOp, std::size_t N> __global__ void eltwise_op_vec(Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params params) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto x_vPtr = vector_type::get_pointer(x.data()); auto y_vPtr = vector_type::get_pointer(y.data()); EltwiseOp eltwise_op(params); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec_x, vec_y; v_load(vec_x, x_vPtr[i]); v_load(vec_y, y_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec_x.data[j] = eltwise_op(vec_x.data[j], vec_y.data[j]); v_store(output_vPtr[i], vec_x); } } } template <class T, class EltwiseOp, std::size_t N> static void launch_vectorized_eltwise_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& params) { CV_Assert(x.size() == y.size()); CV_Assert(x.size() == output.size()); CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(x, N)); CV_Assert(is_fully_aligned<T>(y, N)); auto kernel = raw::eltwise_op_vec<T, EltwiseOp, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, x, y, params); } template <class T, class EltwiseOp> static void eltwise_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& params = {}) { CV_Assert(x.size() == y.size()); CV_Assert(x.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(x, 4) && is_fully_aligned<T>(y, 4)) { launch_vectorized_eltwise_op<T, EltwiseOp, 4>(stream, output, x, y, params); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(x, 2) && is_fully_aligned<T>(y, 2)) { launch_vectorized_eltwise_op<T, EltwiseOp, 2>(stream, output, x, y, params); } else { launch_vectorized_eltwise_op<T, EltwiseOp, 1>(stream, output, x, y, params); } } template <class T> void eltwise_max_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, MaxFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_sum_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, SumFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_sum_coeff_2(const Stream& stream, Span<T> output, T coeff_x, View<T> x, T coeff_y, View<T> y) { eltwise_op<T, ScaledSumFunctor<T>>(stream, output, x, y, {coeff_x, coeff_y}); } template <class T> void eltwise_prod_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, ProductFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_div_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, DivFunctor<T>>(stream, output, x, y); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void eltwise_div_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_prod_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_sum_coeff_2(const Stream&, Span<__half>, __half, View<__half>, __half, View<__half>); template void eltwise_sum_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_max_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); #endif template void eltwise_div_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_prod_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_sum_coeff_2(const Stream&, Span<float>, float, View<float>, float, View<float>); template void eltwise_sum_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_max_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
64768e4737c68a22ca03d211a9df234a59bd2e81.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "functors.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include <opencv2/core.hpp> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, class EltwiseOp, std::size_t N> __global__ void eltwise_op_vec(Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params params) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto x_vPtr = vector_type::get_pointer(x.data()); auto y_vPtr = vector_type::get_pointer(y.data()); EltwiseOp eltwise_op(params); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec_x, vec_y; v_load(vec_x, x_vPtr[i]); v_load(vec_y, y_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec_x.data[j] = eltwise_op(vec_x.data[j], vec_y.data[j]); v_store(output_vPtr[i], vec_x); } } } template <class T, class EltwiseOp, std::size_t N> static void launch_vectorized_eltwise_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& params) { CV_Assert(x.size() == y.size()); CV_Assert(x.size() == output.size()); CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(x, N)); CV_Assert(is_fully_aligned<T>(y, N)); auto kernel = raw::eltwise_op_vec<T, EltwiseOp, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, x, y, params); } template <class T, class EltwiseOp> static void eltwise_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& params = {}) { CV_Assert(x.size() == y.size()); CV_Assert(x.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(x, 4) && is_fully_aligned<T>(y, 4)) { launch_vectorized_eltwise_op<T, EltwiseOp, 4>(stream, output, x, y, params); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(x, 2) && is_fully_aligned<T>(y, 2)) { launch_vectorized_eltwise_op<T, EltwiseOp, 2>(stream, output, x, y, params); } else { launch_vectorized_eltwise_op<T, EltwiseOp, 1>(stream, output, x, y, params); } } template <class T> void eltwise_max_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, MaxFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_sum_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, SumFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_sum_coeff_2(const Stream& stream, Span<T> output, T coeff_x, View<T> x, T coeff_y, View<T> y) { eltwise_op<T, ScaledSumFunctor<T>>(stream, output, x, y, {coeff_x, coeff_y}); } template <class T> void eltwise_prod_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, ProductFunctor<T>>(stream, output, x, y); } template <class T> void eltwise_div_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) { eltwise_op<T, DivFunctor<T>>(stream, output, x, y); } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void eltwise_div_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_prod_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_sum_coeff_2(const Stream&, Span<__half>, __half, View<__half>, __half, View<__half>); template void eltwise_sum_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_max_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); #endif template void eltwise_div_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_prod_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_sum_coeff_2(const Stream&, Span<float>, float, View<float>, float, View<float>); template void eltwise_sum_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_max_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
24fa1f5374ef49ed81607f34f90d5f90ea51af41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "build.h" namespace hagrid { static __constant__ ivec3 grid_dims; static __constant__ vec3 grid_min; static __constant__ vec3 cell_size; static __constant__ vec3 grid_inv; static __constant__ int grid_shift; /// Returns true if an overlap with a neighboring cell is possible template <int axis, bool dir> __device__ bool overlap_possible(const Cell& cell) { if (dir) return get<axis>(cell.max) < get<axis>(grid_dims); else return get<axis>(cell.min) > 0; } /// Determines if the given range of references is a subset of the other __device__ __forceinline__ bool is_subset(const int* __restrict__ p0, int c0, const int* __restrict__ p1, int c1) { if (c1 > c0) return false; if (c1 == 0) return true; int i = 0, j = 0; do { const int a = p0[i]; const int b = p1[j]; if (b < a) return false; j += (a == b); i++; } while (i < c0 & j < c1); return j == c1; } /// Computes the amount of overlap possible for a cell and a given primitive template <int axis, bool dir, typename Primitive> __device__ int compute_overlap(const Primitive& prim, const Cell& cell, const BBox& cell_bbox, int d) { static constexpr int axis1 = (axis + 1) % 3; static constexpr int axis2 = (axis + 2) % 3; auto prim_bbox = prim.bbox(); if (get<axis1>(prim_bbox.min) <= get<axis1>(cell_bbox.max) && get<axis1>(prim_bbox.max) >= get<axis1>(cell_bbox.min) && get<axis2>(prim_bbox.min) <= get<axis2>(cell_bbox.max) && get<axis2>(prim_bbox.max) >= get<axis2>(cell_bbox.min)) { // Approximation: use the original bounding box, not the clipped one int prim_d = ((dir ? get<axis>(prim_bbox.min) : get<axis>(prim_bbox.max)) - get<axis>(grid_min)) * get<axis>(grid_inv); d = dir ? min(d, prim_d - get<axis>(cell.max)) : max(d, prim_d - get<axis>(cell.min) + 1); d = dir ? max(d, 0) : min(d, 0); } return d; } /// Finds the maximum overlap possible for one cell template <int axis, bool dir, bool subset_only, typename Primitive> __device__ int find_overlap(const Entry* __restrict__ entries, const int* __restrict__ refs, const Primitive* __restrict__ prims, const Cell* cells, const Cell& cell, bool& continue_overlap) { constexpr int axis1 = (axis + 1) % 3; constexpr int axis2 = (axis + 2) % 3; if (!overlap_possible<axis, dir>(cell)) return 0; int d = dir ? get<axis>(grid_dims) : -get<axis>(grid_dims); int k1, k2 = get<axis2>(grid_dims); int i = get<axis1>(cell.min); int j = get<axis2>(cell.min); int max_d = d; while (true) { ivec3 next_cell; if (axis == 0) next_cell = ivec3(dir ? cell.max.x : cell.min.x - 1, i, j); if (axis == 1) next_cell = ivec3(j, dir ? cell.max.y : cell.min.y - 1, i); if (axis == 2) next_cell = ivec3(i, j, dir ? cell.max.z : cell.min.z - 1); auto entry = lookup_entry(entries, grid_shift, grid_dims >> grid_shift, next_cell); auto next = load_cell(cells + entry); max_d = dir ? min(max_d, get<axis>(next.max) - get<axis>(cell.max)) : max(max_d, get<axis>(next.min) - get<axis>(cell.min)); d = dir ? min(d, max_d) : max(d, max_d); if (subset_only) { if (!is_subset(refs + cell.begin, cell.end - cell.begin, refs + next.begin, next.end - next.begin)) { d = 0; break; } } else { if (next.begin < next.end) { auto cell_bbox = BBox(grid_min + cell_size * vec3(cell.min), grid_min + cell_size * vec3(cell.max)); int p1 = cell.begin, p2 = next.begin; int ref2 = refs[p2]; while (true) { // Skip references that are present in the current cell while (p1 < cell.end) { int ref1 = refs[p1]; if (ref1 > ref2) break; if (ref1 == ref2) { if (++p2 >= next.end) break; ref2 = refs[p2]; } p1++; } if (p2 >= next.end) break; // Process references that are only present in the next cell d = compute_overlap<axis, dir>(load_prim(prims + ref2), cell, cell_bbox, d); if (d == 0 || ++p2 >= next.end) break; ref2 = refs[p2]; } } if (d == 0) break; } k1 = get<axis1>(next.max) - i; k2 = min(k2, get<axis2>(next.max) - j); i += k1; if (i >= get<axis1>(cell.max)) { i = get<axis1>(cell.min); j += k2; k2 = get<axis2>(grid_dims); if (j >= get<axis2>(cell.max)) break; } } continue_overlap |= d == max_d; return d; } template <int axis, typename Primitive> __global__ void overlap_step(const Entry* __restrict__ entries, const int* __restrict__ refs, const Primitive* __restrict__ prims, const Cell* __restrict__ cells, Cell* __restrict__ new_cells, int* __restrict__ cell_flags, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells || (cell_flags[id] & (1 << axis)) == 0) return; auto cell = load_cell(cells + id); bool flag = false; constexpr bool subset_only = true; auto ov1 = find_overlap<axis, false, subset_only>(entries, refs, prims, cells, cell, flag); auto ov2 = find_overlap<axis, true, subset_only>(entries, refs, prims, cells, cell, flag); if (axis == 0) { cell.min.x += ov1; cell.max.x += ov2; } if (axis == 1) { cell.min.y += ov1; cell.max.y += ov2; } if (axis == 2) { cell.min.z += ov1; cell.max.z += ov2; } // If the cell has not been expanded, we will not process it next time cell_flags[id] = (flag ? 1 << axis : 0) | (cell_flags[id] & ~(1 << axis)); store_cell(new_cells + id, cell); } template <typename Primitive> void expansion_iter(Grid& grid, const Primitive* prims, Cell*& new_cells, int* cell_flags) { hipLaunchKernelGGL(( overlap_step<0>), dim3(round_div(grid.num_cells, 64)), dim3(64), 0, 0, grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); hipLaunchKernelGGL(( overlap_step<1>), dim3(round_div(grid.num_cells, 64)), dim3(64), 0, 0, grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); hipLaunchKernelGGL(( overlap_step<2>), dim3(round_div(grid.num_cells, 64)), dim3(64), 0, 0, grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); } template <typename Primitive> void expand(MemManager& mem, Grid& grid, const Primitive* prims, int iters) { if (iters == 0) return; auto new_cells = mem.alloc<Cell>(grid.num_cells); auto cell_flags = mem.alloc<int>(grid.num_cells); mem.one(cell_flags, grid.num_cells); auto extents = grid.bbox.extents(); auto dims = grid.dims << grid.shift; auto cell_size = extents / vec3(dims); auto grid_inv = vec3(dims) / extents; set_global(hagrid::grid_dims, dims); set_global(hagrid::grid_min, grid.bbox.min); set_global(hagrid::cell_size, cell_size); set_global(hagrid::grid_inv, grid_inv); set_global(hagrid::grid_shift, grid.shift); for (int i = 0; i < iters; i++) expansion_iter(grid, prims, new_cells, cell_flags); mem.free(cell_flags); mem.free(new_cells); } void expand_grid(MemManager& mem, Grid& grid, const Tri* tris, int iters) { expand(mem, grid, tris, iters); } } // namespace hagrid
24fa1f5374ef49ed81607f34f90d5f90ea51af41.cu
#include "build.h" namespace hagrid { static __constant__ ivec3 grid_dims; static __constant__ vec3 grid_min; static __constant__ vec3 cell_size; static __constant__ vec3 grid_inv; static __constant__ int grid_shift; /// Returns true if an overlap with a neighboring cell is possible template <int axis, bool dir> __device__ bool overlap_possible(const Cell& cell) { if (dir) return get<axis>(cell.max) < get<axis>(grid_dims); else return get<axis>(cell.min) > 0; } /// Determines if the given range of references is a subset of the other __device__ __forceinline__ bool is_subset(const int* __restrict__ p0, int c0, const int* __restrict__ p1, int c1) { if (c1 > c0) return false; if (c1 == 0) return true; int i = 0, j = 0; do { const int a = p0[i]; const int b = p1[j]; if (b < a) return false; j += (a == b); i++; } while (i < c0 & j < c1); return j == c1; } /// Computes the amount of overlap possible for a cell and a given primitive template <int axis, bool dir, typename Primitive> __device__ int compute_overlap(const Primitive& prim, const Cell& cell, const BBox& cell_bbox, int d) { static constexpr int axis1 = (axis + 1) % 3; static constexpr int axis2 = (axis + 2) % 3; auto prim_bbox = prim.bbox(); if (get<axis1>(prim_bbox.min) <= get<axis1>(cell_bbox.max) && get<axis1>(prim_bbox.max) >= get<axis1>(cell_bbox.min) && get<axis2>(prim_bbox.min) <= get<axis2>(cell_bbox.max) && get<axis2>(prim_bbox.max) >= get<axis2>(cell_bbox.min)) { // Approximation: use the original bounding box, not the clipped one int prim_d = ((dir ? get<axis>(prim_bbox.min) : get<axis>(prim_bbox.max)) - get<axis>(grid_min)) * get<axis>(grid_inv); d = dir ? min(d, prim_d - get<axis>(cell.max)) : max(d, prim_d - get<axis>(cell.min) + 1); d = dir ? max(d, 0) : min(d, 0); } return d; } /// Finds the maximum overlap possible for one cell template <int axis, bool dir, bool subset_only, typename Primitive> __device__ int find_overlap(const Entry* __restrict__ entries, const int* __restrict__ refs, const Primitive* __restrict__ prims, const Cell* cells, const Cell& cell, bool& continue_overlap) { constexpr int axis1 = (axis + 1) % 3; constexpr int axis2 = (axis + 2) % 3; if (!overlap_possible<axis, dir>(cell)) return 0; int d = dir ? get<axis>(grid_dims) : -get<axis>(grid_dims); int k1, k2 = get<axis2>(grid_dims); int i = get<axis1>(cell.min); int j = get<axis2>(cell.min); int max_d = d; while (true) { ivec3 next_cell; if (axis == 0) next_cell = ivec3(dir ? cell.max.x : cell.min.x - 1, i, j); if (axis == 1) next_cell = ivec3(j, dir ? cell.max.y : cell.min.y - 1, i); if (axis == 2) next_cell = ivec3(i, j, dir ? cell.max.z : cell.min.z - 1); auto entry = lookup_entry(entries, grid_shift, grid_dims >> grid_shift, next_cell); auto next = load_cell(cells + entry); max_d = dir ? min(max_d, get<axis>(next.max) - get<axis>(cell.max)) : max(max_d, get<axis>(next.min) - get<axis>(cell.min)); d = dir ? min(d, max_d) : max(d, max_d); if (subset_only) { if (!is_subset(refs + cell.begin, cell.end - cell.begin, refs + next.begin, next.end - next.begin)) { d = 0; break; } } else { if (next.begin < next.end) { auto cell_bbox = BBox(grid_min + cell_size * vec3(cell.min), grid_min + cell_size * vec3(cell.max)); int p1 = cell.begin, p2 = next.begin; int ref2 = refs[p2]; while (true) { // Skip references that are present in the current cell while (p1 < cell.end) { int ref1 = refs[p1]; if (ref1 > ref2) break; if (ref1 == ref2) { if (++p2 >= next.end) break; ref2 = refs[p2]; } p1++; } if (p2 >= next.end) break; // Process references that are only present in the next cell d = compute_overlap<axis, dir>(load_prim(prims + ref2), cell, cell_bbox, d); if (d == 0 || ++p2 >= next.end) break; ref2 = refs[p2]; } } if (d == 0) break; } k1 = get<axis1>(next.max) - i; k2 = min(k2, get<axis2>(next.max) - j); i += k1; if (i >= get<axis1>(cell.max)) { i = get<axis1>(cell.min); j += k2; k2 = get<axis2>(grid_dims); if (j >= get<axis2>(cell.max)) break; } } continue_overlap |= d == max_d; return d; } template <int axis, typename Primitive> __global__ void overlap_step(const Entry* __restrict__ entries, const int* __restrict__ refs, const Primitive* __restrict__ prims, const Cell* __restrict__ cells, Cell* __restrict__ new_cells, int* __restrict__ cell_flags, int num_cells) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= num_cells || (cell_flags[id] & (1 << axis)) == 0) return; auto cell = load_cell(cells + id); bool flag = false; constexpr bool subset_only = true; auto ov1 = find_overlap<axis, false, subset_only>(entries, refs, prims, cells, cell, flag); auto ov2 = find_overlap<axis, true, subset_only>(entries, refs, prims, cells, cell, flag); if (axis == 0) { cell.min.x += ov1; cell.max.x += ov2; } if (axis == 1) { cell.min.y += ov1; cell.max.y += ov2; } if (axis == 2) { cell.min.z += ov1; cell.max.z += ov2; } // If the cell has not been expanded, we will not process it next time cell_flags[id] = (flag ? 1 << axis : 0) | (cell_flags[id] & ~(1 << axis)); store_cell(new_cells + id, cell); } template <typename Primitive> void expansion_iter(Grid& grid, const Primitive* prims, Cell*& new_cells, int* cell_flags) { overlap_step<0><<<round_div(grid.num_cells, 64), 64>>>(grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); overlap_step<1><<<round_div(grid.num_cells, 64), 64>>>(grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); overlap_step<2><<<round_div(grid.num_cells, 64), 64>>>(grid.entries, grid.ref_ids, prims, grid.cells, new_cells, cell_flags, grid.num_cells); std::swap(new_cells, grid.cells); DEBUG_SYNC(); } template <typename Primitive> void expand(MemManager& mem, Grid& grid, const Primitive* prims, int iters) { if (iters == 0) return; auto new_cells = mem.alloc<Cell>(grid.num_cells); auto cell_flags = mem.alloc<int>(grid.num_cells); mem.one(cell_flags, grid.num_cells); auto extents = grid.bbox.extents(); auto dims = grid.dims << grid.shift; auto cell_size = extents / vec3(dims); auto grid_inv = vec3(dims) / extents; set_global(hagrid::grid_dims, dims); set_global(hagrid::grid_min, grid.bbox.min); set_global(hagrid::cell_size, cell_size); set_global(hagrid::grid_inv, grid_inv); set_global(hagrid::grid_shift, grid.shift); for (int i = 0; i < iters; i++) expansion_iter(grid, prims, new_cells, cell_flags); mem.free(cell_flags); mem.free(new_cells); } void expand_grid(MemManager& mem, Grid& grid, const Tri* tris, int iters) { expand(mem, grid, tris, iters); } } // namespace hagrid
f367aef3e07e6d2fb8311af0c9f6f931ef1e59e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pairwise_transform.h" __device__ float op(float d1,float d2,float *params) { if(d1 > d2) return 1; else return 0; } __device__ float op(float d1,float *params) { return d1; } extern "C" __global__ void gt_strided_float(int n,int xOffset,int yOffset, float *dx, float *dy,int incx,int incy,float *params,float *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
f367aef3e07e6d2fb8311af0c9f6f931ef1e59e3.cu
#include "pairwise_transform.h" __device__ float op(float d1,float d2,float *params) { if(d1 > d2) return 1; else return 0; } __device__ float op(float d1,float *params) { return d1; } extern "C" __global__ void gt_strided_float(int n,int xOffset,int yOffset, float *dx, float *dy,int incx,int incy,float *params,float *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
c4af64674ddb1bf2e6a3ae3d6f52e775f0277713.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernely; int xdim0_flux_calc_kernely_h = -1; __constant__ int ydim0_flux_calc_kernely; int ydim0_flux_calc_kernely_h = -1; __constant__ int xdim1_flux_calc_kernely; int xdim1_flux_calc_kernely_h = -1; __constant__ int ydim1_flux_calc_kernely; int ydim1_flux_calc_kernely_h = -1; __constant__ int xdim2_flux_calc_kernely; int xdim2_flux_calc_kernely_h = -1; __constant__ int ydim2_flux_calc_kernely; int ydim2_flux_calc_kernely_h = -1; __constant__ int xdim3_flux_calc_kernely; int xdim3_flux_calc_kernely_h = -1; __constant__ int ydim3_flux_calc_kernely; int ydim3_flux_calc_kernely_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernely*(y)+xdim0_flux_calc_kernely*ydim0_flux_calc_kernely*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernely*(y)+xdim1_flux_calc_kernely*ydim1_flux_calc_kernely*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernely*(y)+xdim2_flux_calc_kernely*ydim2_flux_calc_kernely*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernely*(y)+xdim3_flux_calc_kernely*ydim3_flux_calc_kernely*(z)) //user function __device__ void flux_calc_kernely( double *vol_flux_y, const double *yarea, const double *yvel0, const double *yvel1) { vol_flux_y[OPS_ACC0(0,0,0)] = 0.125 * dt * (yarea[OPS_ACC1(0,0,0)]) * ( yvel0[OPS_ACC2(0,0,0)] + yvel0[OPS_ACC2(1,0,0)] + yvel0[OPS_ACC2(0,0,1)] + yvel0[OPS_ACC2(1,0,1)] + yvel1[OPS_ACC3(0,0,0)] + yvel1[OPS_ACC3(1,0,0)] + yvel1[OPS_ACC3(0,0,1)] + yvel1[OPS_ACC3(1,0,1)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernely( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_flux_calc_kernely + idx_z * 1 * xdim0_flux_calc_kernely * ydim0_flux_calc_kernely; arg1 += idx_x * 1 + idx_y * 1 * xdim1_flux_calc_kernely + idx_z * 1 * xdim1_flux_calc_kernely * ydim1_flux_calc_kernely; arg2 += idx_x * 1 + idx_y * 1 * xdim2_flux_calc_kernely + idx_z * 1 * xdim2_flux_calc_kernely * ydim2_flux_calc_kernely; arg3 += idx_x * 1 + idx_y * 1 * xdim3_flux_calc_kernely + idx_z * 1 * xdim3_flux_calc_kernely * ydim3_flux_calc_kernely; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernely(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_flux_calc_kernely(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_arg args[4] = { arg0, arg1, arg2, arg3}; ops_timing_realloc(9,"flux_calc_kernely"); OPS_kernels[9].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_flux_calc_kernely_h || ydim0 != ydim0_flux_calc_kernely_h || xdim1 != xdim1_flux_calc_kernely_h || ydim1 != ydim1_flux_calc_kernely_h || xdim2 != xdim2_flux_calc_kernely_h || ydim2 != ydim2_flux_calc_kernely_h || xdim3 != xdim3_flux_calc_kernely_h || ydim3 != ydim3_flux_calc_kernely_h) { hipMemcpyToSymbol( xdim0_flux_calc_kernely, &xdim0, sizeof(int) ); xdim0_flux_calc_kernely_h = xdim0; hipMemcpyToSymbol( ydim0_flux_calc_kernely, &ydim0, sizeof(int) ); ydim0_flux_calc_kernely_h = ydim0; hipMemcpyToSymbol( xdim1_flux_calc_kernely, &xdim1, sizeof(int) ); xdim1_flux_calc_kernely_h = xdim1; hipMemcpyToSymbol( ydim1_flux_calc_kernely, &ydim1, sizeof(int) ); ydim1_flux_calc_kernely_h = ydim1; hipMemcpyToSymbol( xdim2_flux_calc_kernely, &xdim2, sizeof(int) ); xdim2_flux_calc_kernely_h = xdim2; hipMemcpyToSymbol( ydim2_flux_calc_kernely, &ydim2, sizeof(int) ); ydim2_flux_calc_kernely_h = ydim2; hipMemcpyToSymbol( xdim3_flux_calc_kernely, &xdim3, sizeof(int) ); xdim3_flux_calc_kernely_h = xdim3; hipMemcpyToSymbol( ydim3_flux_calc_kernely, &ydim3, sizeof(int) ); ydim3_flux_calc_kernely_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); ops_timers_core(&c1,&t1); OPS_kernels[9].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_flux_calc_kernely), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[9].time += t2-t1; ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); //Update kernel record OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg3); }
c4af64674ddb1bf2e6a3ae3d6f52e775f0277713.cu
// // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernely; int xdim0_flux_calc_kernely_h = -1; __constant__ int ydim0_flux_calc_kernely; int ydim0_flux_calc_kernely_h = -1; __constant__ int xdim1_flux_calc_kernely; int xdim1_flux_calc_kernely_h = -1; __constant__ int ydim1_flux_calc_kernely; int ydim1_flux_calc_kernely_h = -1; __constant__ int xdim2_flux_calc_kernely; int xdim2_flux_calc_kernely_h = -1; __constant__ int ydim2_flux_calc_kernely; int ydim2_flux_calc_kernely_h = -1; __constant__ int xdim3_flux_calc_kernely; int xdim3_flux_calc_kernely_h = -1; __constant__ int ydim3_flux_calc_kernely; int ydim3_flux_calc_kernely_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernely*(y)+xdim0_flux_calc_kernely*ydim0_flux_calc_kernely*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernely*(y)+xdim1_flux_calc_kernely*ydim1_flux_calc_kernely*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernely*(y)+xdim2_flux_calc_kernely*ydim2_flux_calc_kernely*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernely*(y)+xdim3_flux_calc_kernely*ydim3_flux_calc_kernely*(z)) //user function __device__ void flux_calc_kernely( double *vol_flux_y, const double *yarea, const double *yvel0, const double *yvel1) { vol_flux_y[OPS_ACC0(0,0,0)] = 0.125 * dt * (yarea[OPS_ACC1(0,0,0)]) * ( yvel0[OPS_ACC2(0,0,0)] + yvel0[OPS_ACC2(1,0,0)] + yvel0[OPS_ACC2(0,0,1)] + yvel0[OPS_ACC2(1,0,1)] + yvel1[OPS_ACC3(0,0,0)] + yvel1[OPS_ACC3(1,0,0)] + yvel1[OPS_ACC3(0,0,1)] + yvel1[OPS_ACC3(1,0,1)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernely( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_flux_calc_kernely + idx_z * 1 * xdim0_flux_calc_kernely * ydim0_flux_calc_kernely; arg1 += idx_x * 1 + idx_y * 1 * xdim1_flux_calc_kernely + idx_z * 1 * xdim1_flux_calc_kernely * ydim1_flux_calc_kernely; arg2 += idx_x * 1 + idx_y * 1 * xdim2_flux_calc_kernely + idx_z * 1 * xdim2_flux_calc_kernely * ydim2_flux_calc_kernely; arg3 += idx_x * 1 + idx_y * 1 * xdim3_flux_calc_kernely + idx_z * 1 * xdim3_flux_calc_kernely * ydim3_flux_calc_kernely; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernely(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_flux_calc_kernely(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_arg args[4] = { arg0, arg1, arg2, arg3}; ops_timing_realloc(9,"flux_calc_kernely"); OPS_kernels[9].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_flux_calc_kernely_h || ydim0 != ydim0_flux_calc_kernely_h || xdim1 != xdim1_flux_calc_kernely_h || ydim1 != ydim1_flux_calc_kernely_h || xdim2 != xdim2_flux_calc_kernely_h || ydim2 != ydim2_flux_calc_kernely_h || xdim3 != xdim3_flux_calc_kernely_h || ydim3 != ydim3_flux_calc_kernely_h) { cudaMemcpyToSymbol( xdim0_flux_calc_kernely, &xdim0, sizeof(int) ); xdim0_flux_calc_kernely_h = xdim0; cudaMemcpyToSymbol( ydim0_flux_calc_kernely, &ydim0, sizeof(int) ); ydim0_flux_calc_kernely_h = ydim0; cudaMemcpyToSymbol( xdim1_flux_calc_kernely, &xdim1, sizeof(int) ); xdim1_flux_calc_kernely_h = xdim1; cudaMemcpyToSymbol( ydim1_flux_calc_kernely, &ydim1, sizeof(int) ); ydim1_flux_calc_kernely_h = ydim1; cudaMemcpyToSymbol( xdim2_flux_calc_kernely, &xdim2, sizeof(int) ); xdim2_flux_calc_kernely_h = xdim2; cudaMemcpyToSymbol( ydim2_flux_calc_kernely, &ydim2, sizeof(int) ); ydim2_flux_calc_kernely_h = ydim2; cudaMemcpyToSymbol( xdim3_flux_calc_kernely, &xdim3, sizeof(int) ); xdim3_flux_calc_kernely_h = xdim3; cudaMemcpyToSymbol( ydim3_flux_calc_kernely, &ydim3, sizeof(int) ); ydim3_flux_calc_kernely_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); ops_timers_core(&c1,&t1); OPS_kernels[9].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_flux_calc_kernely<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[9].time += t2-t1; ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); //Update kernel record OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[9].transfer += ops_compute_transfer(dim, range, &arg3); }
eedff0b557bebc04e602863fe2dd8ecc9ac0f140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // REQUIRES: nvptx-registered-target // Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ T d_t_i = {2}; __constant__ T c_t_i = {2}; __device__ ECD d_ecd_i{}; __shared__ ECD s_ecd_i{}; __constant__ ECD c_ecd_i{}; __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} // Verify that local variables may be static on device // side and that they conform to the initialization constraints. // __shared__ can't be initialized at all and others don't support dynamic initialization. __device__ void df_sema() { static __device__ int ds; static __constant__ int dc; static int v; static const int cv = 1; static const __device__ int cds = 1; static const __constant__ int cdc = 1; // __shared__ does not need to be explicitly static. __shared__ int lsi; // __constant__, __device__, and __managed__ can not be non-static local __constant__ int lci; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} __device__ int ldi; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} // Same test cases as for the globals above. static __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ T d_t_i = {2}; static __constant__ T c_t_i = {2}; static __device__ ECD d_ecd_i; static __shared__ ECD s_ecd_i; static __constant__ ECD c_ecd_i; static __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} } __host__ __device__ void hd_sema() { static int x = 42; } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); } // Verify that we also check field initializers in instantiated structs. struct NontrivialInitializer { __host__ __device__ NontrivialInitializer() : x(43) {} int x; }; template <typename T> __global__ void bar() { __shared__ T bad; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} } void instantiate() { hipLaunchKernelGGL(( bar<NontrivialInitializer>), dim3(1), dim3(1), 0, 0, ); // expected-note@-1 {{in instantiation of function template specialization 'bar<NontrivialInitializer>' requested here}} }
eedff0b557bebc04e602863fe2dd8ecc9ac0f140.cu
// REQUIRES: nvptx-registered-target // Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ T d_t_i = {2}; __constant__ T c_t_i = {2}; __device__ ECD d_ecd_i{}; __shared__ ECD s_ecd_i{}; __constant__ ECD c_ecd_i{}; __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} // Verify that local variables may be static on device // side and that they conform to the initialization constraints. // __shared__ can't be initialized at all and others don't support dynamic initialization. __device__ void df_sema() { static __device__ int ds; static __constant__ int dc; static int v; static const int cv = 1; static const __device__ int cds = 1; static const __constant__ int cdc = 1; // __shared__ does not need to be explicitly static. __shared__ int lsi; // __constant__, __device__, and __managed__ can not be non-static local __constant__ int lci; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} __device__ int ldi; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} // Same test cases as for the globals above. static __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ T d_t_i = {2}; static __constant__ T c_t_i = {2}; static __device__ ECD d_ecd_i; static __shared__ ECD s_ecd_i; static __constant__ ECD c_ecd_i; static __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} } __host__ __device__ void hd_sema() { static int x = 42; } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); } // Verify that we also check field initializers in instantiated structs. struct NontrivialInitializer { __host__ __device__ NontrivialInitializer() : x(43) {} int x; }; template <typename T> __global__ void bar() { __shared__ T bad; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} } void instantiate() { bar<NontrivialInitializer><<<1, 1>>>(); // expected-note@-1 {{in instantiation of function template specialization 'bar<NontrivialInitializer>' requested here}} }
33e2c20c33fd1a1131266b0bd7663aa1e53d73f2.hip
// !!! This is a file automatically generated by hipify!!! /* CPU code, still some bugs. */ //#include <hip/hip_runtime.h> //#include <vector> //#include <string> //#include <set> // //#include "load_obj.h" //#include "collision.cuh" //#include "check.cuh" //#include "./common/book.h" //#include "cpu.cuh" // //#define COL_MAX_LEN 1000000 // //void printElapsedTime(hipEvent_t* start, hipEvent_t* stop, const char* opname) { // printf("\nTime of %s: ", opname); // float elapsedTime; // HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, *start, *stop)); // printf("%3.1f ms\n", elapsedTime); //} // //void makeAndPrintSet(unsigned int* data, unsigned int num, const char* title) //{ // set<unsigned int> dset; // for (int i = 0; i < num; i++) { // dset.insert(data[i]); // } // // printf("\n\n%s%u points in total:\n", title, dset.size()); // set<unsigned int>::iterator it; // for (it = dset.begin(); it != dset.end(); it++) { // printf("%u\n", *it); // } //} // //int main() //{ // hipEvent_t start, stop, m_start, m_stop; // HANDLE_ERROR(hipEventCreate(&start)); // HANDLE_ERROR(hipEventCreate(&stop)); // HANDLE_ERROR(hipEventCreate(&m_start)); // HANDLE_ERROR(hipEventCreate(&m_stop)); // // HANDLE_ERROR(hipEventRecord(m_start, 0)); // // const std::string file_path = "F:////cuda/projects/CollisionDetection/flag-2000-changed.obj"; // // std::vector<vec3f> vertexes; // std::vector<Triangle> triangles; // std::vector<unsigned long long int> mortons; // // loadObj(file_path, vertexes, triangles, mortons); // const unsigned int m_size = mortons.size(); // const unsigned int v_size = vertexes.size(); // // vec3f* v_ptr = &vertexes[0]; // new vec3f[v_size]; // Triangle* t_ptr = &triangles[0]; // new Triangle[m_size]; // unsigned long long int* m_ptr = &mortons[0]; // new unsigned long long int[m_size]; // Node* leaf_nodes = new Node[m_size]; // Node* internal_nodes = new Node[m_size-1]; // unsigned int* collision_list = new unsigned int[10000]; // unsigned int test_val; // // memset(collision_list, 0, 10000 * sizeof(unsigned int)); // // /* */ // fillLeafNodesCpu(t_ptr, m_size, leaf_nodes); // // /* BVH */ // printf("\n- before generateHierarchyParallel, wrongParentNum = %u\n", collision_list[0]); // generateHierarchyParallelCpu(m_ptr, m_size, leaf_nodes, internal_nodes, collision_list); // printf("\n- generateHierarchyParallel check result: wrongParentNum = %u, with total nodes=%u\n\n", collision_list[0], m_size-1); // // /* */ // calBoundingBoxCpu(leaf_nodes, v_ptr, m_size); // // /* */ // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkInternalNodesCpu(internal_nodes, m_size - 1, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3], &collision_list[4]); // printf("\n- Internal node check result: nullParentnum = %u, wrongBoundCount=%u, nullChildCount=%u, notInternalCount=%u, uninitBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], collision_list[4], m_size-1); // // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkLeafNodesCpu(leaf_nodes, m_size, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3]); // printf("\n- Leaf node check result: nullParentnum = %u, nullTriangle=%u, notLeafCount=%u, illegalBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], m_size); // // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkTriangleIdxCpu(leaf_nodes, v_ptr, m_size, 632674, &collision_list[0]); // printf("\n- Triangle check result: illegal triangle vidx num = %u, with total triangles=%u\n\n", collision_list[0], mortons.size()); // printf("\n$ triangle num = %u, mortons num = %u, vertex num = %u\n\n", triangles.size(), mortons.size(), vertexes.size()); // // /* */ // //findCollisionsCpu(internal_nodes, leaf_nodes, v_ptr, m_size, &test_val, collision_list); // ////HANDLE_ERROR(hipMemcpy(temp_nums, test_val, sizeof(unsigned int), hipMemcpyDeviceToHost)); // ////HANDLE_ERROR(hipMemcpy(h_collision_list, collision_list, 1000 * sizeof(unsigned int), hipMemcpyDeviceToHost)); // //printf("\n\n- contact val = %u\n", test_val); // // //printf("\nCollision pair (%u triangle pairs in total):\n", test_val); // //for (int i = 0; i < test_val; i++) { // // printf("%07u - %07u\n", collision_list[2 * i], collision_list[2 * i + 1]); // //} // // //makeAndPrintSet(collision_list, 2 * test_val, "Collision Triangles:"); // // std::cout << "- Successfully Return" << std::endl; // // HANDLE_ERROR(hipEventRecord(m_stop, 0)); HANDLE_ERROR(hipEventSynchronize(m_stop)); // printElapsedTime(&m_start, &m_stop, "Total Time"); // // printf("\n test for clzll: %u\n", clzll(4567, 1)); // // //printf("\n test for __builtin_clz: %u\n", __builtin_clz(1278)); // // return 0; //}
33e2c20c33fd1a1131266b0bd7663aa1e53d73f2.cu
/* CPU code, still some bugs. */ //#include <cuda_runtime.h> //#include <vector> //#include <string> //#include <set> // //#include "load_obj.h" //#include "collision.cuh" //#include "check.cuh" //#include "./common/book.h" //#include "cpu.cuh" // //#define COL_MAX_LEN 1000000 // //void printElapsedTime(cudaEvent_t* start, cudaEvent_t* stop, const char* opname) { // printf("\nTime of %s: ", opname); // float elapsedTime; // HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, *start, *stop)); // printf("%3.1f ms\n", elapsedTime); //} // //void makeAndPrintSet(unsigned int* data, unsigned int num, const char* title) //{ // set<unsigned int> dset; // for (int i = 0; i < num; i++) { // dset.insert(data[i]); // } // // printf("\n\n%s(%u points in total):\n", title, dset.size()); // set<unsigned int>::iterator it; // for (it = dset.begin(); it != dset.end(); it++) { // printf("%u\n", *it); // } //} // //int main() //{ // cudaEvent_t start, stop, m_start, m_stop; // HANDLE_ERROR(cudaEventCreate(&start)); // HANDLE_ERROR(cudaEventCreate(&stop)); // HANDLE_ERROR(cudaEventCreate(&m_start)); // HANDLE_ERROR(cudaEventCreate(&m_stop)); // // HANDLE_ERROR(cudaEventRecord(m_start, 0)); // // const std::string file_path = "F:/坚果云文件/我的坚果云/研一上/cuda/projects/CollisionDetection/flag-2000-changed.obj"; // // std::vector<vec3f> vertexes; // std::vector<Triangle> triangles; // std::vector<unsigned long long int> mortons; // // loadObj(file_path, vertexes, triangles, mortons); // const unsigned int m_size = mortons.size(); // const unsigned int v_size = vertexes.size(); // // vec3f* v_ptr = &vertexes[0]; // new vec3f[v_size]; // Triangle* t_ptr = &triangles[0]; // new Triangle[m_size]; // unsigned long long int* m_ptr = &mortons[0]; // new unsigned long long int[m_size]; // Node* leaf_nodes = new Node[m_size]; // Node* internal_nodes = new Node[m_size-1]; // unsigned int* collision_list = new unsigned int[10000]; // unsigned int test_val; // // memset(collision_list, 0, 10000 * sizeof(unsigned int)); // // /* 填充叶节点 */ // fillLeafNodesCpu(t_ptr, m_size, leaf_nodes); // // /* 生成BVH */ // printf("\n- before generateHierarchyParallel, wrongParentNum = %u\n", collision_list[0]); // generateHierarchyParallelCpu(m_ptr, m_size, leaf_nodes, internal_nodes, collision_list); // printf("\n- generateHierarchyParallel check result: wrongParentNum = %u, with total nodes=%u\n\n", collision_list[0], m_size-1); // // /* 计算包围盒 */ // calBoundingBoxCpu(leaf_nodes, v_ptr, m_size); // // /* 内部节点和叶节点自检 */ // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkInternalNodesCpu(internal_nodes, m_size - 1, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3], &collision_list[4]); // printf("\n- Internal node check result: nullParentnum = %u, wrongBoundCount=%u, nullChildCount=%u, notInternalCount=%u, uninitBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], collision_list[4], m_size-1); // // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkLeafNodesCpu(leaf_nodes, m_size, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3]); // printf("\n- Leaf node check result: nullParentnum = %u, nullTriangle=%u, notLeafCount=%u, illegalBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], m_size); // // memset(collision_list, 0, sizeof(unsigned int) * 5); // checkTriangleIdxCpu(leaf_nodes, v_ptr, m_size, 632674, &collision_list[0]); // printf("\n- Triangle check result: illegal triangle vidx num = %u, with total triangles=%u\n\n", collision_list[0], mortons.size()); // printf("\n$ triangle num = %u, mortons num = %u, vertex num = %u\n\n", triangles.size(), mortons.size(), vertexes.size()); // // /* 寻找碰撞点对 */ // //findCollisionsCpu(internal_nodes, leaf_nodes, v_ptr, m_size, &test_val, collision_list); // ////HANDLE_ERROR(cudaMemcpy(temp_nums, test_val, sizeof(unsigned int), cudaMemcpyDeviceToHost)); // ////HANDLE_ERROR(cudaMemcpy(h_collision_list, collision_list, 1000 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // //printf("\n\n- contact val = %u\n", test_val); // // //printf("\nCollision pair (%u triangle pairs in total):\n", test_val); // //for (int i = 0; i < test_val; i++) { // // printf("%07u - %07u\n", collision_list[2 * i], collision_list[2 * i + 1]); // //} // // //makeAndPrintSet(collision_list, 2 * test_val, "Collision Triangles:"); // // std::cout << "- Successfully Return" << std::endl; // // HANDLE_ERROR(cudaEventRecord(m_stop, 0)); HANDLE_ERROR(cudaEventSynchronize(m_stop)); // printElapsedTime(&m_start, &m_stop, "Total Time"); // // printf("\n test for clzll: %u\n", clzll(4567, 1)); // // //printf("\n test for __builtin_clz: %u\n", __builtin_clz(1278)); // // return 0; //}
ceaeec994b253bd4a93769cbf36127b2bd74b57a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <opencv2/opencv.hpp> #include <vector> using namespace std; __global__ void grayscale(unsigned char *rgb, unsigned char *out, std::size_t rows, std::size_t cols){ auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx < rows && idy < cols){ out[idx * cols + idy] = ( 307 * rgb[ 3 * ( idx * cols + idy ) ] + 604 * rgb[ 3 * ( idx * cols + idy ) + 1 ] + 113 * rgb[ 3 * ( idx * cols + idy ) + 2 ] ) / 1024; } } int main() { cv::Mat m_in = cv::imread("4v9mo.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( rows * cols ); cv::Mat m_out(rows,cols, CV_8UC1, g.data()); unsigned char *rgb_d = nullptr; unsigned char *m_d = nullptr; hipMalloc(&m_d, rows * cols); hipMalloc(&rgb_d, 3 * rows * cols ); hipMemcpy(rgb_d,rgb,rows * cols * 3,hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start ); dim3 block(32,32); dim3 grid( ( rows - 1) / block.x + 1 , ( cols - 1 ) /block.y + 1 ); hipLaunchKernelGGL(( grayscale), dim3(grid),dim3(block), 0, 0, rgb_d,m_d,rows,cols); //gestion des erreurs. /*hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if(err != hipSuccess){ cerr << hipGetErrorString(err) << endl; exit(EXIT_FAILURE); }*/ hipMemcpy(g.data(),m_d,rows * cols,hipMemcpyDeviceToHost); hipEventRecord( stop ); hipEventSynchronize( stop ); float duration = 0.0f; hipEventElapsedTime( &duration, start, stop ); cout << "time: " << duration << "ms\n"; //hipMemcpy(g.data(),m_d,rows * cols,hipMemcpyDeviceToHost); cv::imwrite( "out.jpg", m_out ); hipFree(rgb_d); hipFree(m_d); return 0; }
ceaeec994b253bd4a93769cbf36127b2bd74b57a.cu
#include <iostream> #include <opencv2/opencv.hpp> #include <vector> using namespace std; __global__ void grayscale(unsigned char *rgb, unsigned char *out, std::size_t rows, std::size_t cols){ auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx < rows && idy < cols){ out[idx * cols + idy] = ( 307 * rgb[ 3 * ( idx * cols + idy ) ] + 604 * rgb[ 3 * ( idx * cols + idy ) + 1 ] + 113 * rgb[ 3 * ( idx * cols + idy ) + 2 ] ) / 1024; } } int main() { cv::Mat m_in = cv::imread("4v9mo.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( rows * cols ); cv::Mat m_out(rows,cols, CV_8UC1, g.data()); unsigned char *rgb_d = nullptr; unsigned char *m_d = nullptr; cudaMalloc(&m_d, rows * cols); cudaMalloc(&rgb_d, 3 * rows * cols ); cudaMemcpy(rgb_d,rgb,rows * cols * 3,cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start ); dim3 block(32,32); dim3 grid( ( rows - 1) / block.x + 1 , ( cols - 1 ) /block.y + 1 ); grayscale<<<grid,block>>>(rgb_d,m_d,rows,cols); //gestion des erreurs. /*cudaDeviceSynchronize(); cudaError err = cudaGetLastError(); if(err != cudaSuccess){ cerr << cudaGetErrorString(err) << endl; exit(EXIT_FAILURE); }*/ cudaMemcpy(g.data(),m_d,rows * cols,cudaMemcpyDeviceToHost); cudaEventRecord( stop ); cudaEventSynchronize( stop ); float duration = 0.0f; cudaEventElapsedTime( &duration, start, stop ); cout << "time: " << duration << "ms\n"; //cudaMemcpy(g.data(),m_d,rows * cols,cudaMemcpyDeviceToHost); cv::imwrite( "out.jpg", m_out ); cudaFree(rgb_d); cudaFree(m_d); return 0; }
2efab13f9f29ed2fc57ee8f2b4884a645c5d872d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <sys/time.h> using namespace std; /* example for atomic function usage */ __global__ void atomic(int n, float *a) { //a[0] += 1.0f; // gives wrong result // instead use atomic function atomicAdd(&a[0], 1.0f); } int main() { int n = 1024; float *data = (float*) malloc(n * sizeof(float)); for (int i=0; i<n; i++) { data[i] = (float)i; } float *data_dev; hipMalloc((void**) &data_dev, n * sizeof(float)); hipMemcpy(data_dev, data, n * sizeof(float) , hipMemcpyHostToDevice); hipError_t error = hipGetLastError(); cout << "copy to device = " << error << " : " << hipGetErrorString(error) << endl; int nBlocks = 1; int nThreads = 1024; hipLaunchKernelGGL(( atomic) , dim3(nBlocks), dim3(nThreads) , 0, 0, n, data_dev); error = hipGetLastError(); cout << "run kernel = " << error << " : " << hipGetErrorString(error) << endl; hipMemcpy(data, data_dev, n * sizeof(float) , hipMemcpyDeviceToHost); error = hipGetLastError(); cout << "copy from device = " << error << " : " << hipGetErrorString(error) << endl; hipFree(data_dev); cout << "data[0] = " << data[0] << endl; free(data); }
2efab13f9f29ed2fc57ee8f2b4884a645c5d872d.cu
#include <cuda.h> #include <iostream> #include <sys/time.h> using namespace std; /* example for atomic function usage */ __global__ void atomic(int n, float *a) { //a[0] += 1.0f; // gives wrong result // instead use atomic function atomicAdd(&a[0], 1.0f); } int main() { int n = 1024; float *data = (float*) malloc(n * sizeof(float)); for (int i=0; i<n; i++) { data[i] = (float)i; } float *data_dev; cudaMalloc((void**) &data_dev, n * sizeof(float)); cudaMemcpy(data_dev, data, n * sizeof(float) , cudaMemcpyHostToDevice); cudaError_t error = cudaGetLastError(); cout << "copy to device = " << error << " : " << cudaGetErrorString(error) << endl; int nBlocks = 1; int nThreads = 1024; atomic <<< nBlocks, nThreads >>>(n, data_dev); error = cudaGetLastError(); cout << "run kernel = " << error << " : " << cudaGetErrorString(error) << endl; cudaMemcpy(data, data_dev, n * sizeof(float) , cudaMemcpyDeviceToHost); error = cudaGetLastError(); cout << "copy from device = " << error << " : " << cudaGetErrorString(error) << endl; cudaFree(data_dev); cout << "data[0] = " << data[0] << endl; free(data); }
467bda461b633daf7f5d38b2c5641c09f3d050a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype *in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype *out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { if (bottom.size() == 1) { return; } Dtype *top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype *bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (bottom.size() == 1) { return; } const Dtype *top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype *bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
467bda461b633daf7f5d38b2c5641c09f3d050a5.cu
#include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype *in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype *out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { if (bottom.size() == 1) { return; } Dtype *top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype *bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (bottom.size() == 1) { return; } const Dtype *top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype *bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
b89d0f5153f46e0de52fedf988fbd93ad1e48d67.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 2 #define ITERATIONS 500000000 #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=1; float Value2=A[i]; float Value3=B[i]; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access // if(i%32==0){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*A[i]; Value3=I2*B[i]; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } // } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
b89d0f5153f46e0de52fedf988fbd93ad1e48d67.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 2 #define ITERATIONS 500000000 #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=1; float Value2=A[i]; float Value3=B[i]; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access // if(i%32==0){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*A[i]; Value3=I2*B[i]; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } // } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
PointwiseOpsKernel.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/PointwiseOps.h> #include <THH/THHNumerics.cuh> namespace at { namespace native { void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcmul_cuda", [&]() { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return a + alpha * b * c; }); }); } void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcdiv_cuda", [&]() { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return a + alpha * (b / c); }); }); } void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() { auto norm_val = norm.to<scalar_t>(); gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t { const auto x = input - target; if (x < scalar_t(-1)) return -norm_val * grad_output; else if (x > scalar_t(1)) return norm_val * grad_output; else return norm_val * x * grad_output; }); }); } void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "mse_backward_cuda", [&] { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return alpha * (a - b) * c; }); }); }); } REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel); REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel); REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel); REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel); }} // namespace at::native
PointwiseOpsKernel.cu
#include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/PointwiseOps.h> #include <THC/THCNumerics.cuh> namespace at { namespace native { void addcmul_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcmul_cuda", [&]() { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return a + alpha * b * c; }); }); } void addcdiv_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "addcdiv_cuda", [&]() { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return a + alpha * (b / c); }); }); } void smooth_l1_backward_cuda_kernel(TensorIterator& iter, Scalar norm) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&]() { auto norm_val = norm.to<scalar_t>(); gpu_kernel(iter, [norm_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t { const auto x = input - target; if (x < scalar_t(-1)) return -norm_val * grad_output; else if (x > scalar_t(1)) return norm_val * grad_output; else return norm_val * x * grad_output; }); }); } void mse_backward_cuda_kernel(TensorIterator& iter, Scalar value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "mse_backward_cuda", [&] { auto alpha = value.to<scalar_t>(); gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t { return alpha * (a - b) * c; }); }); }); } REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel); REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel); REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel); REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel); }} // namespace at::native
187d96d2ab77e0ab03f11cf0cb646e92e34f2ea8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // Implementation of a level's step in scan. Slide 43 // Find the location of the previous sum and add it to its new position. __global__ void kernUpsweep(int numObjects, int* odata, int level) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= numObjects) { return; } // Every add in this operation is shifted right based on the depth of the tree. int sumIndexShift = powf(2, level); // Find which element the current thread is supposed to access. int currentIndex = (index + 1) * (2 * sumIndexShift) - 1; // Add the element which is shifted to the left. // This is equivalent to finding the left child of the parent node. // The current element is the right child; add to update the array. odata[currentIndex] += odata[currentIndex - sumIndexShift]; } // Make this an exclusive prefix scan by making the first element 0. // Push all other elements back. Slide 46 __global__ void kernDownsweep(int numObjects, int* odata, int level) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= numObjects) { return; } // Every add in this operation is shifted left based on the depth of the tree. int sumIndexShift = powf(2, level); // Find which element the current thread is supposed to access. int currentIndex = (index + 1) * (2 * sumIndexShift) - 1; // Perform the actual addition. int temp = odata[currentIndex - sumIndexShift]; odata[currentIndex - sumIndexShift] = odata[currentIndex]; odata[currentIndex] += temp; } // This kernel is used to zero the n-1th element, the root, for downsweeping. __global__ void kernZeroRoot(int n, int *idata) { idata[n] = 0; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanNoTimer(int n, int *odata, const int *idata) { /* Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length. Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil). This requires extra memory: your intermediate array sizes will need to be rounded to the next power of two. */ int nextPowerOfTwo = 1; while (nextPowerOfTwo < n) { nextPowerOfTwo *= 2; } // "Create two device arrays. Swap them at each iteration: // read from A and write to B, read from B and write to A." int* dev_A; hipMalloc((void**)&dev_A, nextPowerOfTwo * sizeof(int)); // Zero out this chunk of memory since it's not getting filled by idata hipMemset(dev_A, 0, nextPowerOfTwo * sizeof(int)); // How to copy data to the GPU hipMemcpy(dev_A, idata, sizeof(int) * n, hipMemcpyHostToDevice); // Build the summed values before downsweeping int depthMax = ilog2ceil(nextPowerOfTwo); for (int depth = 0; depth < depthMax; ++depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernUpsweep), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo, dev_A, depth); } // "Set root to zero" dim3 fullBlocksPerGrid((nextPowerOfTwo + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernZeroRoot), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo - 1, dev_A); // "Traverse back down tree using partial sums to build the scan" for (int depth = (depthMax - 1); depth > -1; --depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernDownsweep), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo, dev_A, depth); } // Get the return value off of the device and free memory. hipMemcpy(odata, dev_A, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_A); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { /* Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length. Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil). This requires extra memory: your intermediate array sizes will need to be rounded to the next power of two. */ int nextPowerOfTwo = 1; while (nextPowerOfTwo < n) { nextPowerOfTwo *= 2; } // Do not time memory allocation // "Create two device arrays. Swap them at each iteration: // read from A and write to B, read from B and write to A." int* dev_A; hipMalloc((void**)&dev_A, nextPowerOfTwo * sizeof(int)); // Zero out this chunk of memory since it's not getting filled by idata hipMemset(dev_A, 0, nextPowerOfTwo * sizeof(int)); // How to copy data to the GPU hipMemcpy(dev_A, idata, sizeof(int) * n, hipMemcpyHostToDevice); // Time everything else timer().startGpuTimer(); // Build the summed values before downsweeping int depthMax = ilog2ceil(nextPowerOfTwo); for (int depth = 0; depth < depthMax; ++depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernUpsweep), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo, dev_A, depth); checkCUDAErrorFn("upsweep failed"); } // "Set root to zero" dim3 fullBlocksPerGrid((nextPowerOfTwo + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernZeroRoot), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo - 1, dev_A); checkCUDAErrorFn("zeroRoot failed"); // "Traverse back down tree using partial sums to build the scan" for (int depth = (depthMax - 1); depth > -1; --depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernDownsweep), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, nextPowerOfTwo, dev_A, depth); checkCUDAErrorFn("downsweep failed"); } timer().endGpuTimer(); // Get the return value off of the device and free memory. hipMemcpy(odata, dev_A, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_A); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // Do not time memory allocation int* dev_odata; hipMalloc((void**)&dev_odata, n * sizeof(int)); int* dev_idata; hipMalloc((void**)&dev_idata, n * sizeof(int)); hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice); int* dev_bools; hipMalloc((void**)&dev_bools, n * sizeof(int)); int* dev_indices; hipMalloc((void**)&dev_indices, n * sizeof(int)); // Time everything else timer().startGpuTimer(); // Step 1: Compute temporary array. dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_bools, dev_idata); // Start counting the number of elements--check this last index in dev_bools to see if // it contains the element which might get shifted out after scanning. int elementsRemaining; hipMemcpy(&elementsRemaining, dev_bools + n - 1, sizeof(int), hipMemcpyDeviceToHost); // Step 2: Run exclusive scan on temporary array hipMemcpy(dev_indices, dev_bools, n * sizeof(int), hipMemcpyDeviceToDevice); scanNoTimer(n, dev_indices, dev_indices); // Get this final sum from scanning and add it to the variable already accounting for the // potentially-shifted element. int elementsRemainingAdd; hipMemcpy(&elementsRemainingAdd, dev_indices + n - 1, sizeof(int), hipMemcpyDeviceToHost); elementsRemaining += elementsRemainingAdd; // Step 3: Scatter hipLaunchKernelGGL(( Common::kernScatter), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); // Get the return value off of the device and free memory. hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_odata); hipFree(dev_idata); hipFree(dev_bools); hipFree(dev_indices); return elementsRemaining; } } }
187d96d2ab77e0ab03f11cf0cb646e92e34f2ea8.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // Implementation of a level's step in scan. Slide 43 // Find the location of the previous sum and add it to its new position. __global__ void kernUpsweep(int numObjects, int* odata, int level) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= numObjects) { return; } // Every add in this operation is shifted right based on the depth of the tree. int sumIndexShift = powf(2, level); // Find which element the current thread is supposed to access. int currentIndex = (index + 1) * (2 * sumIndexShift) - 1; // Add the element which is shifted to the left. // This is equivalent to finding the left child of the parent node. // The current element is the right child; add to update the array. odata[currentIndex] += odata[currentIndex - sumIndexShift]; } // Make this an exclusive prefix scan by making the first element 0. // Push all other elements back. Slide 46 __global__ void kernDownsweep(int numObjects, int* odata, int level) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= numObjects) { return; } // Every add in this operation is shifted left based on the depth of the tree. int sumIndexShift = powf(2, level); // Find which element the current thread is supposed to access. int currentIndex = (index + 1) * (2 * sumIndexShift) - 1; // Perform the actual addition. int temp = odata[currentIndex - sumIndexShift]; odata[currentIndex - sumIndexShift] = odata[currentIndex]; odata[currentIndex] += temp; } // This kernel is used to zero the n-1th element, the root, for downsweeping. __global__ void kernZeroRoot(int n, int *idata) { idata[n] = 0; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanNoTimer(int n, int *odata, const int *idata) { /* Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length. Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil). This requires extra memory: your intermediate array sizes will need to be rounded to the next power of two. */ int nextPowerOfTwo = 1; while (nextPowerOfTwo < n) { nextPowerOfTwo *= 2; } // "Create two device arrays. Swap them at each iteration: // read from A and write to B, read from B and write to A." int* dev_A; cudaMalloc((void**)&dev_A, nextPowerOfTwo * sizeof(int)); // Zero out this chunk of memory since it's not getting filled by idata cudaMemset(dev_A, 0, nextPowerOfTwo * sizeof(int)); // How to copy data to the GPU cudaMemcpy(dev_A, idata, sizeof(int) * n, cudaMemcpyHostToDevice); // Build the summed values before downsweeping int depthMax = ilog2ceil(nextPowerOfTwo); for (int depth = 0; depth < depthMax; ++depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); kernUpsweep<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo, dev_A, depth); } // "Set root to zero" dim3 fullBlocksPerGrid((nextPowerOfTwo + blockSize - 1) / blockSize); kernZeroRoot<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo - 1, dev_A); // "Traverse back down tree using partial sums to build the scan" for (int depth = (depthMax - 1); depth > -1; --depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); kernDownsweep<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo, dev_A, depth); } // Get the return value off of the device and free memory. cudaMemcpy(odata, dev_A, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_A); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { /* Since the work-efficient scan operates on a binary tree structure, it works best with arrays with power-of-two length. Make sure your implementation works on non-power-of-two sized arrays (see ilog2ceil). This requires extra memory: your intermediate array sizes will need to be rounded to the next power of two. */ int nextPowerOfTwo = 1; while (nextPowerOfTwo < n) { nextPowerOfTwo *= 2; } // Do not time memory allocation // "Create two device arrays. Swap them at each iteration: // read from A and write to B, read from B and write to A." int* dev_A; cudaMalloc((void**)&dev_A, nextPowerOfTwo * sizeof(int)); // Zero out this chunk of memory since it's not getting filled by idata cudaMemset(dev_A, 0, nextPowerOfTwo * sizeof(int)); // How to copy data to the GPU cudaMemcpy(dev_A, idata, sizeof(int) * n, cudaMemcpyHostToDevice); // Time everything else timer().startGpuTimer(); // Build the summed values before downsweeping int depthMax = ilog2ceil(nextPowerOfTwo); for (int depth = 0; depth < depthMax; ++depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); kernUpsweep<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo, dev_A, depth); checkCUDAErrorFn("upsweep failed"); } // "Set root to zero" dim3 fullBlocksPerGrid((nextPowerOfTwo + blockSize - 1) / blockSize); kernZeroRoot<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo - 1, dev_A); checkCUDAErrorFn("zeroRoot failed"); // "Traverse back down tree using partial sums to build the scan" for (int depth = (depthMax - 1); depth > -1; --depth) { dim3 fullBlocksPerGrid(((nextPowerOfTwo / pow(2, depth + 1)) + blockSize - 1) / blockSize); kernDownsweep<<<fullBlocksPerGrid, blockSize>>>(nextPowerOfTwo, dev_A, depth); checkCUDAErrorFn("downsweep failed"); } timer().endGpuTimer(); // Get the return value off of the device and free memory. cudaMemcpy(odata, dev_A, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_A); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // Do not time memory allocation int* dev_odata; cudaMalloc((void**)&dev_odata, n * sizeof(int)); int* dev_idata; cudaMalloc((void**)&dev_idata, n * sizeof(int)); cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice); int* dev_bools; cudaMalloc((void**)&dev_bools, n * sizeof(int)); int* dev_indices; cudaMalloc((void**)&dev_indices, n * sizeof(int)); // Time everything else timer().startGpuTimer(); // Step 1: Compute temporary array. dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); Common::kernMapToBoolean<<<fullBlocksPerGrid, blockSize>>>(n, dev_bools, dev_idata); // Start counting the number of elements--check this last index in dev_bools to see if // it contains the element which might get shifted out after scanning. int elementsRemaining; cudaMemcpy(&elementsRemaining, dev_bools + n - 1, sizeof(int), cudaMemcpyDeviceToHost); // Step 2: Run exclusive scan on temporary array cudaMemcpy(dev_indices, dev_bools, n * sizeof(int), cudaMemcpyDeviceToDevice); scanNoTimer(n, dev_indices, dev_indices); // Get this final sum from scanning and add it to the variable already accounting for the // potentially-shifted element. int elementsRemainingAdd; cudaMemcpy(&elementsRemainingAdd, dev_indices + n - 1, sizeof(int), cudaMemcpyDeviceToHost); elementsRemaining += elementsRemainingAdd; // Step 3: Scatter Common::kernScatter<<<fullBlocksPerGrid, blockSize>>>(n, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); // Get the return value off of the device and free memory. cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_odata); cudaFree(dev_idata); cudaFree(dev_bools); cudaFree(dev_indices); return elementsRemaining; } } }
a9e9e645f5c1fd765a63e673223d8ec54df26259.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <hip/hip_runtime.h> #include <boost/format.hpp> #include <stack> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } __global__ void apply_gradient_kernel( float * __restrict data, float * __restrict gradient, const float * __restrict learning_rate, float normalizer, float weight_decay, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float current_weight = data[elem_id]; float lr = learning_rate[elem_id]; float gr = gradient[elem_id]; float new_weight = current_weight + lr * (gr * normalizer - current_weight * weight_decay); data[elem_id] = new_weight; gradient[elem_id] = 0.0F; } } __global__ void apply_gradient_with_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, const float * __restrict learning_rate, float normalizer, float weight_decay, float momentum, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float current_weight = data[elem_id]; float lr = learning_rate[elem_id]; float gr = gradient[elem_id]; float prev_upd = previous_upd[elem_id]; float upd = prev_upd * momentum + lr * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = upd; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); error_function_fused_with_activation = (ef_updater->get_fusable_activation_uuid() == layer_list.back()->get_uuid()); for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) { if ((it != layer_list.end() - 1) || (!error_function_fused_with_activation)) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); } setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } testing_result_smart_ptr network_updater_cuda::actual_update( supervised_data_reader& reader, const layer_data_list& learning_rate, network_data_smart_ptr data, unsigned int batch_size, float weight_decay, float momentum) { testing_result_smart_ptr res(new testing_result(ef)); reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); unsigned int output_neuron_count_per_feature_map = output_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); if (error_function_fused_with_activation && (output_neuron_count_per_feature_map != 1)) throw neural_network_exception("Error function is fused with activation but output_neuron_count_per_feature_map is not equal 1: not implemented"); unsigned int updater_max_count = ::max(get_updater_max_count(), 1U); unsigned int updater_entry_count; std::vector<unsigned int> entry_read_count_list; unsigned int max_entry_read_count; if (updater_max_count > batch_size) updater_entry_count = batch_size; else { unsigned int it_count = (batch_size + updater_max_count - 1) / updater_max_count; updater_entry_count = (batch_size + it_count - 1) / it_count; max_entry_read_count = updater_entry_count; unsigned int sum = 0; while (sum < batch_size) { unsigned int new_item = ::min(batch_size - sum, updater_entry_count); sum += new_item; entry_read_count_list.push_back(new_item); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data_custom = set_get_data_custom(data); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = get_learning_rate(learning_rate); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > gradient = get_zero_gradient(net_data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > previous_upd; if (momentum > 0.0F) previous_upd = get_zero_gradient(net_data); { buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = ::min(::min(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); if (entry_read_count_list.empty() || (max_entry_count >= batch_size)) { unsigned int it_count = ::max((max_entry_count + batch_size - 1) / batch_size, 1U); max_entry_read_count = it_count * batch_size; entry_read_count_list.clear(); entry_read_count_list.push_back(max_entry_read_count); } } cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(hipMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), hipMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_read_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_read_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_read_count * sizeof(float))); float * output = *output_host_buf; // set error to zero cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, 1, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; unsigned int entry_read_count_index = 0; unsigned int entry_gradient_calculated_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(entry_read_count_list[entry_read_count_index], entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); entry_read_count_index++; if (entry_read_count_index >= entry_read_count_list.size()) entry_read_count_index = 0; } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( convert_compacted_to_raw_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream, *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(hipMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } unsigned int base_input_entry_id = 0; while(base_input_entry_id < entries_available_for_processing_count) { std::stack<unsigned int> offset_list; unsigned int current_updater_entry_count = ::min(::min(entries_available_for_processing_count - base_input_entry_id, updater_entry_count), batch_size - entry_gradient_calculated_count); // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, current_updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( (it == updater_list.begin()) ? base_input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); } } // Compute errors { if (error_function_fused_with_activation) ef_updater->enqueue_update_error_and_gradient_fused_with_activation( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); else ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator gradient_it = gradient.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1 - (error_function_fused_with_activation ? 1 : 0); layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++gradient_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it, --reverse_layer_id, ++layer_config_it) { (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? base_input_entry_id : 0, *command_stream, *gradient_it, *net_data_custom_it, *schema_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, current_updater_entry_count * layer_config_it->get_neuron_count(), offset); } } } } base_input_entry_id += current_updater_entry_count; entry_gradient_calculated_count += current_updater_entry_count; if (entry_gradient_calculated_count >= batch_size) { float gradient_normalizer = 1.0F / static_cast<float>(::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rate_data, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; } if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // while(base_input_entry_id < entries_available_for_processing_count) entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(hipStreamSynchronize(*data_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } if (entry_gradient_calculated_count > 0) { float gradient_normalizer = 1.0F / static_cast<float>(::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rate_data, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; } read_data(net_data, data, *command_stream); double error; cuda_safe_call(hipMemcpyAsync(&error, *error_buf, sizeof(double), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); res->init(error, entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(network_data_const_smart_ptr data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data->data_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::set_get_data_custom(network_data_const_smart_ptr data) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->set_get_data_custom(data->data_custom_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_zero_gradient(const std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = data.begin(); it != data.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data; for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) { size_t buf_size = (*it2)->get_size(); cuda_linear_buffer_device_smart_ptr buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(buf_size)); cuda_util::set_with_value( *cuda_config, *buf, 0.0F, buf_size / sizeof(float), 0); device_data.push_back(buf); } res.push_back(device_data); } cuda_safe_call(hipStreamSynchronize(0)); return res; } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_learning_rate(const layer_data_list& learning_rate) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_learning_rate(learning_rate[i + testing_layer_count]); res.push_back(device_data); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, network_data_smart_ptr res, hipStream_t stream_id) const { unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) updater_list[layer_id]->get_data_from_device(*src_it, res->data_list[layer_id + testing_layer_count]); } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_updater_max_count() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( hipStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( dropout_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } void network_updater_cuda::enqueue_apply_gradient( hipStream_t stream_id, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& gradient, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& prev_upd, std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >& learning_rate, float gradient_normalizer, float weight_decay, float momentum) { const const_layer_list& layer_list = *schema; if (momentum> 0.0F) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator prev_upd_it = prev_upd.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator learning_rate_it = learning_rate.begin(); const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++prev_upd_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<cuda_linear_buffer_device_smart_ptr>::iterator prev_upd_it2 = prev_upd_it->begin(); std::vector<const_cuda_linear_buffer_device_smart_ptr>::iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++prev_upd_it2, ++learning_rate_it2, ++part_id) { float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = (*data_it2)->get_size() / sizeof(float); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( apply_gradient_with_momentum_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, **data_it2, **gradient_it2, **prev_upd_it2, **learning_rate_it2, gradient_normalizer, actual_weight_decay, momentum, elem_count); } } } else { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator learning_rate_it = learning_rate.begin(); const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<const_cuda_linear_buffer_device_smart_ptr>::iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++learning_rate_it2, ++part_id) { float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = (*data_it2)->get_size() / sizeof(float); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( apply_gradient_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, **data_it2, **gradient_it2, **learning_rate_it2, gradient_normalizer, actual_weight_decay, elem_count); } } } } } }
a9e9e645f5c1fd765a63e673223d8ec54df26259.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <cuda_runtime.h> #include <boost/format.hpp> #include <stack> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } __global__ void apply_gradient_kernel( float * __restrict data, float * __restrict gradient, const float * __restrict learning_rate, float normalizer, float weight_decay, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float current_weight = data[elem_id]; float lr = learning_rate[elem_id]; float gr = gradient[elem_id]; float new_weight = current_weight + lr * (gr * normalizer - current_weight * weight_decay); data[elem_id] = new_weight; gradient[elem_id] = 0.0F; } } __global__ void apply_gradient_with_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, const float * __restrict learning_rate, float normalizer, float weight_decay, float momentum, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float current_weight = data[elem_id]; float lr = learning_rate[elem_id]; float gr = gradient[elem_id]; float prev_upd = previous_upd[elem_id]; float upd = prev_upd * momentum + lr * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = upd; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); error_function_fused_with_activation = (ef_updater->get_fusable_activation_uuid() == layer_list.back()->get_uuid()); for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) { if ((it != layer_list.end() - 1) || (!error_function_fused_with_activation)) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); } setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } testing_result_smart_ptr network_updater_cuda::actual_update( supervised_data_reader& reader, const layer_data_list& learning_rate, network_data_smart_ptr data, unsigned int batch_size, float weight_decay, float momentum) { testing_result_smart_ptr res(new testing_result(ef)); reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); unsigned int output_neuron_count_per_feature_map = output_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); if (error_function_fused_with_activation && (output_neuron_count_per_feature_map != 1)) throw neural_network_exception("Error function is fused with activation but output_neuron_count_per_feature_map is not equal 1: not implemented"); unsigned int updater_max_count = std::max(get_updater_max_count(), 1U); unsigned int updater_entry_count; std::vector<unsigned int> entry_read_count_list; unsigned int max_entry_read_count; if (updater_max_count > batch_size) updater_entry_count = batch_size; else { unsigned int it_count = (batch_size + updater_max_count - 1) / updater_max_count; updater_entry_count = (batch_size + it_count - 1) / it_count; max_entry_read_count = updater_entry_count; unsigned int sum = 0; while (sum < batch_size) { unsigned int new_item = std::min(batch_size - sum, updater_entry_count); sum += new_item; entry_read_count_list.push_back(new_item); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data_custom = set_get_data_custom(data); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = get_learning_rate(learning_rate); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > gradient = get_zero_gradient(net_data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > previous_upd; if (momentum > 0.0F) previous_upd = get_zero_gradient(net_data); { buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min(std::min(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); if (entry_read_count_list.empty() || (max_entry_count >= batch_size)) { unsigned int it_count = std::max((max_entry_count + batch_size - 1) / batch_size, 1U); max_entry_read_count = it_count * batch_size; entry_read_count_list.clear(); entry_read_count_list.push_back(max_entry_read_count); } } cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(cudaMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), cudaMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_read_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_read_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_read_count * sizeof(float))); float * output = *output_host_buf; // set error to zero cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, 1, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; unsigned int entry_read_count_index = 0; unsigned int entry_gradient_calculated_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(entry_read_count_list[entry_read_count_index], entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); entry_read_count_index++; if (entry_read_count_index >= entry_read_count_list.size()) entry_read_count_index = 0; } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); convert_compacted_to_raw_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>( *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(cudaMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } unsigned int base_input_entry_id = 0; while(base_input_entry_id < entries_available_for_processing_count) { std::stack<unsigned int> offset_list; unsigned int current_updater_entry_count = std::min(std::min(entries_available_for_processing_count - base_input_entry_id, updater_entry_count), batch_size - entry_gradient_calculated_count); // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, current_updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( (it == updater_list.begin()) ? base_input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); } } // Compute errors { if (error_function_fused_with_activation) ef_updater->enqueue_update_error_and_gradient_fused_with_activation( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); else ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator gradient_it = gradient.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1 - (error_function_fused_with_activation ? 1 : 0); layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++gradient_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it, --reverse_layer_id, ++layer_config_it) { (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? base_input_entry_id : 0, *command_stream, *gradient_it, *net_data_custom_it, *schema_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, current_updater_entry_count * layer_config_it->get_neuron_count(), offset); } } } } base_input_entry_id += current_updater_entry_count; entry_gradient_calculated_count += current_updater_entry_count; if (entry_gradient_calculated_count >= batch_size) { float gradient_normalizer = 1.0F / static_cast<float>(std::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rate_data, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; } if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // while(base_input_entry_id < entries_available_for_processing_count) entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(cudaStreamSynchronize(*data_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } if (entry_gradient_calculated_count > 0) { float gradient_normalizer = 1.0F / static_cast<float>(std::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rate_data, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; } read_data(net_data, data, *command_stream); double error; cuda_safe_call(cudaMemcpyAsync(&error, *error_buf, sizeof(double), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); res->init(error, entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(network_data_const_smart_ptr data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data->data_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::set_get_data_custom(network_data_const_smart_ptr data) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->set_get_data_custom(data->data_custom_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_zero_gradient(const std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = data.begin(); it != data.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data; for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) { size_t buf_size = (*it2)->get_size(); cuda_linear_buffer_device_smart_ptr buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(buf_size)); cuda_util::set_with_value( *cuda_config, *buf, 0.0F, buf_size / sizeof(float), 0); device_data.push_back(buf); } res.push_back(device_data); } cuda_safe_call(cudaStreamSynchronize(0)); return res; } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_learning_rate(const layer_data_list& learning_rate) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_learning_rate(learning_rate[i + testing_layer_count]); res.push_back(device_data); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, network_data_smart_ptr res, cudaStream_t stream_id) const { unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) updater_list[layer_id]->get_data_from_device(*src_it, res->data_list[layer_id + testing_layer_count]); } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_updater_max_count() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( cudaStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); dropout_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } void network_updater_cuda::enqueue_apply_gradient( cudaStream_t stream_id, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& gradient, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& prev_upd, std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >& learning_rate, float gradient_normalizer, float weight_decay, float momentum) { const const_layer_list& layer_list = *schema; if (momentum> 0.0F) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator prev_upd_it = prev_upd.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator learning_rate_it = learning_rate.begin(); const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++prev_upd_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<cuda_linear_buffer_device_smart_ptr>::iterator prev_upd_it2 = prev_upd_it->begin(); std::vector<const_cuda_linear_buffer_device_smart_ptr>::iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++prev_upd_it2, ++learning_rate_it2, ++part_id) { float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = (*data_it2)->get_size() / sizeof(float); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); apply_gradient_with_momentum_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( **data_it2, **gradient_it2, **prev_upd_it2, **learning_rate_it2, gradient_normalizer, actual_weight_decay, momentum, elem_count); } } } else { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator learning_rate_it = learning_rate.begin(); const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<const_cuda_linear_buffer_device_smart_ptr>::iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++learning_rate_it2, ++part_id) { float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = (*data_it2)->get_size() / sizeof(float); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); apply_gradient_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( **data_it2, **gradient_it2, **learning_rate_it2, gradient_normalizer, actual_weight_decay, elem_count); } } } } } }
4fa74b60c551e87fdf0946dc5f746e973e37b870.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void InvolveVector(float* input, float* output, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize - 1) { output[0] = input[0]; output[threadId + 1] = input[inputSize - threadId - 1]; } }
4fa74b60c551e87fdf0946dc5f746e973e37b870.cu
#include "includes.h" __global__ void InvolveVector(float* input, float* output, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize - 1) { output[0] = input[0]; output[threadId + 1] = input[inputSize - threadId - 1]; } }
4855a23f7428d8dc4502813d2fbcd0f0bde2e272.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/fc.cu" #include "utils/bitmap_image.hpp" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" #include "utils/load_model.hpp" void demo_cifar10_gpu() { printf("Start demo cifar10 on GPU\n\n"); hipError_t cudaStatus = hipSetDevice(0); checkCudaErrors(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 1; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "datasets/cifar10/train.txt"); // vector<size_t*> data_tops_dims; size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); Conv2D<float> conv1(5,5,3,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); Pooling<float> pool1(2, MAX, 2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); Conv2D<float> conv2(5,5,32,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({relu1_top_dims}, {conv2_top_dims}); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); Conv2D<float> conv3(5,5,32,64,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({relu2_top_dims}, {conv3_top_dims}); Tensor<float> * conv3_top = Tensor<float>::CreateTensorGPU(conv3_top_dims); Pooling<float> pool3(2, MAX, 2); size_t pool3_top_dims[4]; pool3.GetTopsDims({conv3_top_dims}, {pool3_top_dims}); Tensor<float> * pool3_top = Tensor<float>::CreateTensorGPU(pool3_top_dims); Relu<float> relu3; size_t relu3_top_dims[4]; relu3.GetTopsDims({pool3_top_dims}, {relu3_top_dims}); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); size_t to_fc4_dims[4]; to_fc4_dims[0] = relu3_top_dims[0]; to_fc4_dims[1] = 1; to_fc4_dims[2] = 1; to_fc4_dims[3] = relu3_top_dims[1]*relu3_top_dims[2]*relu3_top_dims[3]; FC<float> fc4(to_fc4_dims[3],64); size_t fc4_top_dims[4]; fc4.GetTopsDims({to_fc4_dims}, {fc4_top_dims}); Tensor<float> * fc4_top = Tensor<float>::CreateTensorGPU(fc4_top_dims); FC<float> fc5(64, 10); size_t fc5_top_dims[4]; fc5.GetTopsDims({fc4_top_dims}, {fc5_top_dims}); Tensor<float> * fc5_top = Tensor<float>::CreateTensorGPU(fc5_top_dims); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc5_top_dims}, {sm_top_dims}); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); printf("Loading weights ...\n"); std::string model_path = "models/cifar10/model.txt"; std::ifstream file(model_path); size_t conv1_w_dims[4] = {5,5,3,32}; Tensor<float>* conv1_w = Tensor<float>::CreateTensorCPU(conv1_w_dims); load_to_conv<float>(conv1_w, file); Tensor<float>::DataArrayCPUtoGPU(conv1_w, conv1.W_); size_t conv1_b_dims[4] = {1,1,1,32}; Tensor<float>* conv1_b = Tensor<float>::CreateTensorCPU(conv1_b_dims); load_to_bias<float>(conv1_b, file); Tensor<float>::DataArrayCPUtoGPU(conv1_b, conv1.b_); size_t conv2_w_dims[4] = {5,5,32,32}; Tensor<float>* conv2_w = Tensor<float>::CreateTensorCPU(conv2_w_dims); load_to_conv<float>(conv2_w, file); Tensor<float>::DataArrayCPUtoGPU(conv2_w, conv2.W_); size_t conv2_b_dims[4] = {1,1,1,32}; Tensor<float>* conv2_b = Tensor<float>::CreateTensorCPU(conv2_b_dims); load_to_bias<float>(conv2_b, file); Tensor<float>::DataArrayCPUtoGPU(conv2_b, conv2.b_); size_t conv3_w_dims[4] = {5,5,32,64}; Tensor<float>* conv3_w = Tensor<float>::CreateTensorCPU(conv3_w_dims); load_to_conv<float>(conv3_w, file); Tensor<float>::DataArrayCPUtoGPU(conv3_w, conv3.W_); size_t conv3_b_dims[4] = {1,1,1,64}; Tensor<float>* conv3_b = Tensor<float>::CreateTensorCPU(conv3_b_dims); load_to_bias<float>(conv3_b, file); Tensor<float>::DataArrayCPUtoGPU(conv3_b, conv3.b_); size_t fc4_w_dims[4] = {1,1,64,1024}; Tensor<float>* fc4_w = Tensor<float>::CreateTensorCPU(fc4_w_dims); load_to_fc<float>(fc4_w, file); Tensor<float>::DataArrayCPUtoGPU(fc4_w, fc4.W_); size_t fc4_b_dims[4] = {1,1,1,64}; Tensor<float>* fc4_b = Tensor<float>::CreateTensorCPU(fc4_b_dims); load_to_bias<float>(fc4_b, file); Tensor<float>::DataArrayCPUtoGPU(fc4_b, fc4.b_); size_t fc5_w_dims[4] = {1,1,10,64}; Tensor<float>* fc5_w = Tensor<float>::CreateTensorCPU(fc5_w_dims); load_to_fc<float>(fc5_w, file); Tensor<float>::DataArrayCPUtoGPU(fc5_w, fc5.W_); size_t fc5_b_dims[4] = {1,1,1,10}; Tensor<float>* fc5_b = Tensor<float>::CreateTensorCPU(fc5_b_dims); load_to_bias<float>(fc5_b, file); Tensor<float>::DataArrayCPUtoGPU(fc5_b, fc5.b_); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); float data_t = stopTimer(); printf("data forward: %3.1f ms \n", data_t); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); float conv1_t = stopTimer(); printf("conv1 forward: %3.1f ms \n", conv1_t); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); float pool1_t = stopTimer(); printf("pool1 forward: %3.1f ms \n", pool1_t); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); float relu1_t = stopTimer(); printf("relu1 forward: %3.1f ms \n", relu1_t); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); float conv2_t = stopTimer(); printf("conv2 forward: %3.1f ms \n", conv2_t); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); float pool2_t = stopTimer(); printf("pool2 forward: %3.1f ms \n", pool2_t); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); float relu2_t = stopTimer(); printf("relu2 forward: %3.1f ms \n", relu2_t); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); float conv3_t = stopTimer(); printf("conv3 forward: %3.1f ms \n", conv3_t); startTimer(); pool3.Forward({conv3_top}, {pool3_top}); float pool3_t = stopTimer(); printf("pool3 forward: %3.1f ms \n", pool3_t); startTimer(); relu3.Forward({pool3_top}, {relu3_top}); float relu3_t = stopTimer(); printf("relu3 forward: %3.1f ms \n", relu3_t); startTimer(); // flatten the tensor size_t relu3_top_dims_reshaped[4] = {relu3_top_dims[0], relu3_top_dims[3], relu3_top_dims[1], relu3_top_dims[2]}; Tensor<float> * reshaped_relu3_top_cpu = Tensor<float>::CreateTensorCPU(relu3_top_dims_reshaped); Tensor<float> * relu3_top_cpu = Tensor<float>::TensorGPUtoCPU(relu3_top); for(int b = 0; b < relu3_top_dims_reshaped[0]; b++) { for(int c = 0; c < relu3_top_dims_reshaped[1]; c++) { for(int h = 0; h < relu3_top_dims_reshaped[2]; h++) { for(int w = 0; w < relu3_top_dims_reshaped[3]; w++) { reshaped_relu3_top_cpu->at(b, c, h, w) = relu3_top_cpu->at(b, h, w, c); } } } } Tensor<float> * reshaped_relu3_top = Tensor<float>::TensorCPUtoGPU(reshaped_relu3_top_cpu); // flatten the tensor Tensor<float>::ReshapeTensorGPU(reshaped_relu3_top, to_fc4_dims); fc4.Forward({reshaped_relu3_top}, {fc4_top}); float fc4_t = stopTimer(); printf("fc4 forward: %3.1f ms \n", fc4_t); startTimer(); fc5.Forward({fc4_top}, {fc5_top}); float fc5_t = stopTimer(); printf("fc5 forward: %3.1f ms \n", fc5_t); startTimer(); softmax.Forward({fc5_top}, {sm_top}); float sm_t = stopTimer(); printf("softmax forward: %3.1f ms \n\n", sm_t); startTimer(); float total_time = conv1_t + pool1_t + relu1_t + conv2_t + pool2_t + relu2_t + conv3_t + pool3_t + relu3_t + fc4_t + fc5_t + sm_t; printf("Total forward time: %3.1f ms\n\n", total_time); printf("Prediction: \n"); Tensor<float>* out = Tensor<float>::TensorGPUtoCPU(sm_top); for (int b = 0; b < out->GetDims()[0]; b++) { for (int h = 0; h < out->GetDims()[1]; h++) { for (int w = 0; w < out->GetDims()[2]; w++) { for (int c = 0; c < out->GetDims()[3]; c++) { if (c == 0) { printf("Airplane "); } else if (c == 1) { printf("Automobile "); } else if (c == 2) { printf("Bird "); } else if (c == 3) { printf("Cat "); } else if (c == 4) { printf("Deer "); } else if (c == 5) { printf("Dog "); } else if (c == 6) { printf("Frog "); } else if (c == 7) { printf("Horse "); } else if (c == 8) { printf("Ship "); } else if (c == 9) { printf("Truck "); } printf("probability: %1.4f \n", out->at(b,h,w,c)); } } } } } int main() { demo_cifar10_gpu(); }
4855a23f7428d8dc4502813d2fbcd0f0bde2e272.cu
#include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/fc.cu" #include "utils/bitmap_image.hpp" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" #include "utils/load_model.hpp" void demo_cifar10_gpu() { printf("Start demo cifar10 on GPU\n\n"); cudaError_t cudaStatus = cudaSetDevice(0); checkCudaErrors(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 1; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "datasets/cifar10/train.txt"); // vector<size_t*> data_tops_dims; size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); Conv2D<float> conv1(5,5,3,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); Pooling<float> pool1(2, MAX, 2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); Conv2D<float> conv2(5,5,32,32,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({relu1_top_dims}, {conv2_top_dims}); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); Conv2D<float> conv3(5,5,32,64,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({relu2_top_dims}, {conv3_top_dims}); Tensor<float> * conv3_top = Tensor<float>::CreateTensorGPU(conv3_top_dims); Pooling<float> pool3(2, MAX, 2); size_t pool3_top_dims[4]; pool3.GetTopsDims({conv3_top_dims}, {pool3_top_dims}); Tensor<float> * pool3_top = Tensor<float>::CreateTensorGPU(pool3_top_dims); Relu<float> relu3; size_t relu3_top_dims[4]; relu3.GetTopsDims({pool3_top_dims}, {relu3_top_dims}); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); size_t to_fc4_dims[4]; to_fc4_dims[0] = relu3_top_dims[0]; to_fc4_dims[1] = 1; to_fc4_dims[2] = 1; to_fc4_dims[3] = relu3_top_dims[1]*relu3_top_dims[2]*relu3_top_dims[3]; FC<float> fc4(to_fc4_dims[3],64); size_t fc4_top_dims[4]; fc4.GetTopsDims({to_fc4_dims}, {fc4_top_dims}); Tensor<float> * fc4_top = Tensor<float>::CreateTensorGPU(fc4_top_dims); FC<float> fc5(64, 10); size_t fc5_top_dims[4]; fc5.GetTopsDims({fc4_top_dims}, {fc5_top_dims}); Tensor<float> * fc5_top = Tensor<float>::CreateTensorGPU(fc5_top_dims); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc5_top_dims}, {sm_top_dims}); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); printf("Loading weights ...\n"); std::string model_path = "models/cifar10/model.txt"; std::ifstream file(model_path); size_t conv1_w_dims[4] = {5,5,3,32}; Tensor<float>* conv1_w = Tensor<float>::CreateTensorCPU(conv1_w_dims); load_to_conv<float>(conv1_w, file); Tensor<float>::DataArrayCPUtoGPU(conv1_w, conv1.W_); size_t conv1_b_dims[4] = {1,1,1,32}; Tensor<float>* conv1_b = Tensor<float>::CreateTensorCPU(conv1_b_dims); load_to_bias<float>(conv1_b, file); Tensor<float>::DataArrayCPUtoGPU(conv1_b, conv1.b_); size_t conv2_w_dims[4] = {5,5,32,32}; Tensor<float>* conv2_w = Tensor<float>::CreateTensorCPU(conv2_w_dims); load_to_conv<float>(conv2_w, file); Tensor<float>::DataArrayCPUtoGPU(conv2_w, conv2.W_); size_t conv2_b_dims[4] = {1,1,1,32}; Tensor<float>* conv2_b = Tensor<float>::CreateTensorCPU(conv2_b_dims); load_to_bias<float>(conv2_b, file); Tensor<float>::DataArrayCPUtoGPU(conv2_b, conv2.b_); size_t conv3_w_dims[4] = {5,5,32,64}; Tensor<float>* conv3_w = Tensor<float>::CreateTensorCPU(conv3_w_dims); load_to_conv<float>(conv3_w, file); Tensor<float>::DataArrayCPUtoGPU(conv3_w, conv3.W_); size_t conv3_b_dims[4] = {1,1,1,64}; Tensor<float>* conv3_b = Tensor<float>::CreateTensorCPU(conv3_b_dims); load_to_bias<float>(conv3_b, file); Tensor<float>::DataArrayCPUtoGPU(conv3_b, conv3.b_); size_t fc4_w_dims[4] = {1,1,64,1024}; Tensor<float>* fc4_w = Tensor<float>::CreateTensorCPU(fc4_w_dims); load_to_fc<float>(fc4_w, file); Tensor<float>::DataArrayCPUtoGPU(fc4_w, fc4.W_); size_t fc4_b_dims[4] = {1,1,1,64}; Tensor<float>* fc4_b = Tensor<float>::CreateTensorCPU(fc4_b_dims); load_to_bias<float>(fc4_b, file); Tensor<float>::DataArrayCPUtoGPU(fc4_b, fc4.b_); size_t fc5_w_dims[4] = {1,1,10,64}; Tensor<float>* fc5_w = Tensor<float>::CreateTensorCPU(fc5_w_dims); load_to_fc<float>(fc5_w, file); Tensor<float>::DataArrayCPUtoGPU(fc5_w, fc5.W_); size_t fc5_b_dims[4] = {1,1,1,10}; Tensor<float>* fc5_b = Tensor<float>::CreateTensorCPU(fc5_b_dims); load_to_bias<float>(fc5_b, file); Tensor<float>::DataArrayCPUtoGPU(fc5_b, fc5.b_); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); float data_t = stopTimer(); printf("data forward: %3.1f ms \n", data_t); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); float conv1_t = stopTimer(); printf("conv1 forward: %3.1f ms \n", conv1_t); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); float pool1_t = stopTimer(); printf("pool1 forward: %3.1f ms \n", pool1_t); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); float relu1_t = stopTimer(); printf("relu1 forward: %3.1f ms \n", relu1_t); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); float conv2_t = stopTimer(); printf("conv2 forward: %3.1f ms \n", conv2_t); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); float pool2_t = stopTimer(); printf("pool2 forward: %3.1f ms \n", pool2_t); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); float relu2_t = stopTimer(); printf("relu2 forward: %3.1f ms \n", relu2_t); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); float conv3_t = stopTimer(); printf("conv3 forward: %3.1f ms \n", conv3_t); startTimer(); pool3.Forward({conv3_top}, {pool3_top}); float pool3_t = stopTimer(); printf("pool3 forward: %3.1f ms \n", pool3_t); startTimer(); relu3.Forward({pool3_top}, {relu3_top}); float relu3_t = stopTimer(); printf("relu3 forward: %3.1f ms \n", relu3_t); startTimer(); // flatten the tensor size_t relu3_top_dims_reshaped[4] = {relu3_top_dims[0], relu3_top_dims[3], relu3_top_dims[1], relu3_top_dims[2]}; Tensor<float> * reshaped_relu3_top_cpu = Tensor<float>::CreateTensorCPU(relu3_top_dims_reshaped); Tensor<float> * relu3_top_cpu = Tensor<float>::TensorGPUtoCPU(relu3_top); for(int b = 0; b < relu3_top_dims_reshaped[0]; b++) { for(int c = 0; c < relu3_top_dims_reshaped[1]; c++) { for(int h = 0; h < relu3_top_dims_reshaped[2]; h++) { for(int w = 0; w < relu3_top_dims_reshaped[3]; w++) { reshaped_relu3_top_cpu->at(b, c, h, w) = relu3_top_cpu->at(b, h, w, c); } } } } Tensor<float> * reshaped_relu3_top = Tensor<float>::TensorCPUtoGPU(reshaped_relu3_top_cpu); // flatten the tensor Tensor<float>::ReshapeTensorGPU(reshaped_relu3_top, to_fc4_dims); fc4.Forward({reshaped_relu3_top}, {fc4_top}); float fc4_t = stopTimer(); printf("fc4 forward: %3.1f ms \n", fc4_t); startTimer(); fc5.Forward({fc4_top}, {fc5_top}); float fc5_t = stopTimer(); printf("fc5 forward: %3.1f ms \n", fc5_t); startTimer(); softmax.Forward({fc5_top}, {sm_top}); float sm_t = stopTimer(); printf("softmax forward: %3.1f ms \n\n", sm_t); startTimer(); float total_time = conv1_t + pool1_t + relu1_t + conv2_t + pool2_t + relu2_t + conv3_t + pool3_t + relu3_t + fc4_t + fc5_t + sm_t; printf("Total forward time: %3.1f ms\n\n", total_time); printf("Prediction: \n"); Tensor<float>* out = Tensor<float>::TensorGPUtoCPU(sm_top); for (int b = 0; b < out->GetDims()[0]; b++) { for (int h = 0; h < out->GetDims()[1]; h++) { for (int w = 0; w < out->GetDims()[2]; w++) { for (int c = 0; c < out->GetDims()[3]; c++) { if (c == 0) { printf("Airplane "); } else if (c == 1) { printf("Automobile "); } else if (c == 2) { printf("Bird "); } else if (c == 3) { printf("Cat "); } else if (c == 4) { printf("Deer "); } else if (c == 5) { printf("Dog "); } else if (c == 6) { printf("Frog "); } else if (c == 7) { printf("Horse "); } else if (c == 8) { printf("Ship "); } else if (c == 9) { printf("Truck "); } printf("probability: %1.4f \n", out->at(b,h,w,c)); } } } } } int main() { demo_cifar10_gpu(); }
346216571ceeb34cfe29443b697d80bc3efc76c6.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
346216571ceeb34cfe29443b697d80bc3efc76c6.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
cbff395fbc972decc6e9cb2d58323423fc52b04d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/mgbenchUtilFunctions.h" #define SIZE 128 #define SIZE_2 SIZE / 2 #define GPU_DEVICE 0 #define ERROR_THRESHOLD 0.05 #define default_v 100000.00 typedef struct point { int x; int y; }point; typedef struct sel_points { int position; float value; }sel_points; __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_0(point * pivots); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_1(sel_points * selected_cpu,sel_points * selected_gpu,point * the_points); void init(point *pivots, point *the_points, sel_points *selected_cpu, sel_points *selected_gpu) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_0), dim3((((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (point *)acc_deviceptr((void*)pivots)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected_cpu\n"); acc_present_or_create((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected_gpu\n"); acc_present_or_create((void*)selected_gpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected_cpu\n"); acc_pcopyin((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected_gpu\n"); acc_pcopyin((void*)selected_gpu,(16383+0)*sizeof(sel_points )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_1), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (sel_points *)acc_deviceptr((void*)selected_cpu), (sel_points *)acc_deviceptr((void*)selected_gpu), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected_cpu\n"); acc_copyout_and_keep((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyout selected_gpu\n"); acc_copyout_and_keep((void*)selected_gpu,(16383+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_2(point * pivots,sel_points * selected,point * the_points); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_3(sel_points * selected); void k_nearest_gpu(point *pivots, point *the_points, sel_points *selected) { int i, j, m; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_2), dim3((((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (point *)acc_deviceptr((void*)pivots), (sel_points *)acc_deviceptr((void*)selected), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [3, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 3 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_3), dim3((((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (sel_points *)acc_deviceptr((void*)selected)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_4(point * pivots,sel_points * selected,point * the_points); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_5(sel_points * selected); void k_nearest_cpu(point *pivots, point *the_points, sel_points *selected) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [4, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 4 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_4), dim3((((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (point *)acc_deviceptr((void*)pivots), (sel_points *)acc_deviceptr((void*)selected), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [5, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 5 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_5), dim3((((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (sel_points *)acc_deviceptr((void*)selected)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_6(sel_points * B,sel_points * B_GPU,int fail); void compareResults(sel_points* B, sel_points* B_GPU) { int i, j, fail; fail = 0; ipmacc_prompt((char*)"IPMACC: memory allocation B\n"); acc_present_or_create((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation B_GPU\n"); acc_present_or_create((void*)B_GPU,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin B\n"); acc_pcopyin((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin B_GPU\n"); acc_pcopyin((void*)B_GPU,(16383+0)*sizeof(sel_points )); { /* kernel call statement [6, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 6 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_6), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (sel_points *)acc_deviceptr((void*)B), (sel_points *)acc_deviceptr((void*)B_GPU), fail); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout B\n"); acc_copyout_and_keep((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyout B_GPU\n"); acc_copyout_and_keep((void*)B_GPU,(16383+0)*sizeof(sel_points )); printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; point *pivots; point *the_points; sel_points *selected_cpu, *selected_gpu; fprintf(stdout, "<< K-nearest >>\n"); pivots = (point*)malloc(sizeof(point) * SIZE); the_points = (point*)malloc(sizeof(point) * SIZE); selected_cpu = (sel_points*)malloc(sizeof(sel_points) * SIZE * SIZE); selected_gpu = (sel_points*)malloc(sizeof(sel_points) * SIZE * SIZE); init(pivots, the_points, selected_cpu, selected_gpu); t_start = rtclock(); k_nearest_gpu(pivots, the_points, selected_gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); k_nearest_cpu(pivots, the_points, selected_cpu); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(selected_cpu, selected_gpu); free(selected_cpu); free(selected_gpu); free(pivots); free(the_points); return 0; } __device__ float __accelerator_absVal( float a ) { if ( a < 0 ) { return ( a * -1) ; } else { return a ; } } __device__ float __accelerator_percentDiff( double val1 , double val2 ) { if ( ( __accelerator_absVal( val1 ) < 0.01) && ( __accelerator_absVal( val2 ) < 0.01) ) { return 0.0f ; } else { return 100.0f * ( __accelerator_absVal( __accelerator_absVal( val1 - val2 ) / __accelerator_absVal( val1 + 0.00000001f ) ) ) ; } } __global__ void __generated_kernel_region_0(point * pivots){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { pivots [i].x = i * 3; pivots [i].y = i * 2; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(sel_points * selected_cpu,sel_points * selected_gpu,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { the_points [i].x = i * 3; the_points [i].y = i * 2; for(j = 0; j < SIZE; j++) { selected_cpu [i * SIZE + j].position = 0; selected_cpu [i * SIZE + j].value = default_v; selected_gpu [i * SIZE + j].position = 0; selected_gpu [i * SIZE + j].value = default_v; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(point * pivots,sel_points * selected,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { float distance, x, y; x = pivots [i].x - the_points [j].x; y = pivots [i].y - the_points [j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected [i * SIZE + j].value = distance; selected [i * SIZE + j].position = j; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_3(sel_points * selected){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; int m; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { for(m = j + 1; m < SIZE; m++) { if (selected [i * SIZE + j].value > selected [i * SIZE + m].value) { sel_points aux; aux = selected [i * SIZE + j]; selected [i * SIZE + j] = selected [i * SIZE + m]; selected [i * SIZE + m] = aux; } } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_4(point * pivots,sel_points * selected,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { float distance, x, y; x = pivots [i].x - the_points [j].x; y = pivots [i].y - the_points [j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected [i * SIZE + j].value = distance; selected [i * SIZE + j].position = j; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_5(sel_points * selected){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { int j; for(j = 0; j < SIZE; j++) { int m; for(m = j + 1; m < SIZE; m++) { if (selected [i * SIZE + j].value > selected [i * SIZE + m].value) { sel_points aux; aux = selected [i * SIZE + j]; selected [i * SIZE + j] = selected [i * SIZE + m]; selected [i * SIZE + m] = aux; } } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_6(sel_points * B,sel_points * B_GPU,int fail){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; j++) { if (__accelerator_percentDiff(B [i * SIZE + j].value, B_GPU [i * SIZE + j].value) > ERROR_THRESHOLD) { fail++; } if (__accelerator_percentDiff(B [i * SIZE + j].position, B_GPU [i * SIZE + j].position) > ERROR_THRESHOLD) { fail++; } } } } } } //append writeback of scalar variables }
cbff395fbc972decc6e9cb2d58323423fc52b04d.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/mgbenchUtilFunctions.h" #define SIZE 128 #define SIZE_2 SIZE / 2 #define GPU_DEVICE 0 #define ERROR_THRESHOLD 0.05 #define default_v 100000.00 typedef struct point { int x; int y; }point; typedef struct sel_points { int position; float value; }sel_points; __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_0(point * pivots); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_1(sel_points * selected_cpu,sel_points * selected_gpu,point * the_points); void init(point *pivots, point *the_points, sel_points *selected_cpu, sel_points *selected_gpu) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_0<<<(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (point *)acc_deviceptr((void*)pivots)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected_cpu\n"); acc_present_or_create((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected_gpu\n"); acc_present_or_create((void*)selected_gpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected_cpu\n"); acc_pcopyin((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected_gpu\n"); acc_pcopyin((void*)selected_gpu,(16383+0)*sizeof(sel_points )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_1<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (sel_points *)acc_deviceptr((void*)selected_cpu), (sel_points *)acc_deviceptr((void*)selected_gpu), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected_cpu\n"); acc_copyout_and_keep((void*)selected_cpu,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyout selected_gpu\n"); acc_copyout_and_keep((void*)selected_gpu,(16383+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_2(point * pivots,sel_points * selected,point * the_points); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_3(sel_points * selected); void k_nearest_gpu(point *pivots, point *the_points, sel_points *selected) { int i, j, m; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_2<<<(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (point *)acc_deviceptr((void*)pivots), (sel_points *)acc_deviceptr((void*)selected), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [3, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 3 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_3<<<(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (sel_points *)acc_deviceptr((void*)selected)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_4(point * pivots,sel_points * selected,point * the_points); __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_5(sel_points * selected); void k_nearest_cpu(point *pivots, point *the_points, sel_points *selected) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation pivots\n"); acc_present_or_create((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation the_points\n"); acc_present_or_create((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin pivots\n"); acc_pcopyin((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin the_points\n"); acc_pcopyin((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [4, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 4 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_4<<<(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (point *)acc_deviceptr((void*)pivots), (sel_points *)acc_deviceptr((void*)selected), (point *)acc_deviceptr((void*)the_points)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout pivots\n"); acc_copyout_and_keep((void*)pivots,(63+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout the_points\n"); acc_copyout_and_keep((void*)the_points,(127+0)*sizeof(point )); ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation selected\n"); acc_present_or_create((void*)selected,(8191+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin selected\n"); acc_pcopyin((void*)selected,(8191+0)*sizeof(sel_points )); { /* kernel call statement [5, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 5 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_5<<<(((abs((int)((SIZE_2))-(0+0)))/(1)))/256+(((((abs((int)((SIZE_2))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (sel_points *)acc_deviceptr((void*)selected)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout selected\n"); acc_copyout_and_keep((void*)selected,(8191+0)*sizeof(sel_points )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_6(sel_points * B,sel_points * B_GPU,int fail); void compareResults(sel_points* B, sel_points* B_GPU) { int i, j, fail; fail = 0; ipmacc_prompt((char*)"IPMACC: memory allocation B\n"); acc_present_or_create((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory allocation B_GPU\n"); acc_present_or_create((void*)B_GPU,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin B\n"); acc_pcopyin((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyin B_GPU\n"); acc_pcopyin((void*)B_GPU,(16383+0)*sizeof(sel_points )); { /* kernel call statement [6, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 6 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_6<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (sel_points *)acc_deviceptr((void*)B), (sel_points *)acc_deviceptr((void*)B_GPU), fail); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout B\n"); acc_copyout_and_keep((void*)B,(16383+0)*sizeof(sel_points )); ipmacc_prompt((char*)"IPMACC: memory copyout B_GPU\n"); acc_copyout_and_keep((void*)B_GPU,(16383+0)*sizeof(sel_points )); printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; point *pivots; point *the_points; sel_points *selected_cpu, *selected_gpu; fprintf(stdout, "<< K-nearest >>\n"); pivots = (point*)malloc(sizeof(point) * SIZE); the_points = (point*)malloc(sizeof(point) * SIZE); selected_cpu = (sel_points*)malloc(sizeof(sel_points) * SIZE * SIZE); selected_gpu = (sel_points*)malloc(sizeof(sel_points) * SIZE * SIZE); init(pivots, the_points, selected_cpu, selected_gpu); t_start = rtclock(); k_nearest_gpu(pivots, the_points, selected_gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); k_nearest_cpu(pivots, the_points, selected_cpu); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(selected_cpu, selected_gpu); free(selected_cpu); free(selected_gpu); free(pivots); free(the_points); return 0; } __device__ float __accelerator_absVal( float a ) { if ( a < 0 ) { return ( a * -1) ; } else { return a ; } } __device__ float __accelerator_percentDiff( double val1 , double val2 ) { if ( ( __accelerator_absVal( val1 ) < 0.01) && ( __accelerator_absVal( val2 ) < 0.01) ) { return 0.0f ; } else { return 100.0f * ( __accelerator_absVal( __accelerator_absVal( val1 - val2 ) / __accelerator_absVal( val1 + 0.00000001f ) ) ) ; } } __global__ void __generated_kernel_region_0(point * pivots){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { pivots [i].x = i * 3; pivots [i].y = i * 2; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(sel_points * selected_cpu,sel_points * selected_gpu,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { the_points [i].x = i * 3; the_points [i].y = i * 2; for(j = 0; j < SIZE; j++) { selected_cpu [i * SIZE + j].position = 0; selected_cpu [i * SIZE + j].value = default_v; selected_gpu [i * SIZE + j].position = 0; selected_gpu [i * SIZE + j].value = default_v; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(point * pivots,sel_points * selected,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { float distance, x, y; x = pivots [i].x - the_points [j].x; y = pivots [i].y - the_points [j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected [i * SIZE + j].value = distance; selected [i * SIZE + j].position = j; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_3(sel_points * selected){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; int m; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { for(m = j + 1; m < SIZE; m++) { if (selected [i * SIZE + j].value > selected [i * SIZE + m].value) { sel_points aux; aux = selected [i * SIZE + j]; selected [i * SIZE + j] = selected [i * SIZE + m]; selected [i * SIZE + m] = aux; } } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_4(point * pivots,sel_points * selected,point * the_points){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { for(j = 0; j < SIZE; j++) { float distance, x, y; x = pivots [i].x - the_points [j].x; y = pivots [i].y - the_points [j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected [i * SIZE + j].value = distance; selected [i * SIZE + j].position = j; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_5(sel_points * selected){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE_2) { int j; for(j = 0; j < SIZE; j++) { int m; for(m = j + 1; m < SIZE; m++) { if (selected [i * SIZE + j].value > selected [i * SIZE + m].value) { sel_points aux; aux = selected [i * SIZE + j]; selected [i * SIZE + j] = selected [i * SIZE + m]; selected [i * SIZE + m] = aux; } } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_6(sel_points * B,sel_points * B_GPU,int fail){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; j++) { if (__accelerator_percentDiff(B [i * SIZE + j].value, B_GPU [i * SIZE + j].value) > ERROR_THRESHOLD) { fail++; } if (__accelerator_percentDiff(B [i * SIZE + j].position, B_GPU [i * SIZE + j].position) > ERROR_THRESHOLD) { fail++; } } } } } } //append writeback of scalar variables }
cd92df5e44d41a77c54eda076ba5be2f456f11c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adds the elements of 2 arrays with // a million elements each. #include <iostream> #include <math.h> //kernel __global__ void add(int n, float *x, float *y) { int index = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // = 1M elements; /* --- CPU Version ---- float *x = new float[N]; float *y = new float[N]; //init x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // run add() on CPU (HOST) add(N, x, y); //Check for errors, == 3.0f? float maxError = 0.0f; for(int i = 0; i < N; i++) { maxError = fmax(maxError, fabs(y[i] - 3.0f)); } std::cout << "MAX Error: " << maxError << std::endl; // free mem delete [] x; delete [] y; return 0; */ // Allocate Unified Memory -- accessible from CPU or GPU float *x, *y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // init x and y on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks),dim3(blockSize), 0, 0, N, x, y); //wait for GPU hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max Error: " << maxError << std::endl; // free Mem hipFree(x); hipFree(y); return 0; // hipMalloc((void **) &da, size); // hipMalloc((void **) &db, size); // hipMalloc((void **) &dc, size); // //setup input values // a = 2; // b = 7; // hipMemcpy(da, &a, size, hipMemcpyHostToDevice); // hipMemcpy(db, &b, size, hipMemcpyHostToDevice); // add<<<1, 1>>>(da, db, dc); // hipMemcpy(&c, dc, size, hipMemcpyDeviceToHost); // printf("Value of c: %d", c); // hipFree(da); // hipFree(db); // hipFree(dc); // return 0; }
cd92df5e44d41a77c54eda076ba5be2f456f11c9.cu
// Adds the elements of 2 arrays with // a million elements each. #include <iostream> #include <math.h> //kernel __global__ void add(int n, float *x, float *y) { int index = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // = 1M elements; /* --- CPU Version ---- float *x = new float[N]; float *y = new float[N]; //init x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // run add() on CPU (HOST) add(N, x, y); //Check for errors, == 3.0f? float maxError = 0.0f; for(int i = 0; i < N; i++) { maxError = fmax(maxError, fabs(y[i] - 3.0f)); } std::cout << "MAX Error: " << maxError << std::endl; // free mem delete [] x; delete [] y; return 0; */ // Allocate Unified Memory -- accessible from CPU or GPU float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // init x and y on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks,blockSize>>>(N, x, y); //wait for GPU cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max Error: " << maxError << std::endl; // free Mem cudaFree(x); cudaFree(y); return 0; // cudaMalloc((void **) &da, size); // cudaMalloc((void **) &db, size); // cudaMalloc((void **) &dc, size); // //setup input values // a = 2; // b = 7; // cudaMemcpy(da, &a, size, cudaMemcpyHostToDevice); // cudaMemcpy(db, &b, size, cudaMemcpyHostToDevice); // add<<<1, 1>>>(da, db, dc); // cudaMemcpy(&c, dc, size, cudaMemcpyDeviceToHost); // printf("Value of c: %d", c); // cudaFree(da); // cudaFree(db); // cudaFree(dc); // return 0; }
df6db9e3f84c4bbd3f2d6ceaa88ef7e90bc1b7aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" typedef uint8_t uint8; #define TB 128 #define DISP_MAX 256 #define BLOCK_SIZE 32 #define XDIM_MAX_THREADS 1024 #define BLOCK_D_SIZE 64 #define COLOR_DIFF(x, i, j) (abs(x[i] - x[j])) struct postparams{ float pi1; float pi2; float tau_so; float alpha1; float sgm_q1; float sgm_q2; float alpha2; float sigma; int kernel_size; }; void parseConf(postparams &params,std::string conf ){ const std::string& chars = "\t\n\v\f\r "; std::ifstream ifs(conf.c_str()); std::string line; if(ifs.is_open()){ while(std::getline(ifs,line )){ std::string opt = line.substr(0,line.find_last_of(":")); opt.erase(0, opt.find_first_not_of(chars)); opt.erase(opt.find_last_not_of(chars) + 1); int start = line.find_last_of(":")+1; int end = line.find_first_of("#") - start; std::string val = line.substr(start,end); val.erase(0, val.find_first_not_of(chars)); val.erase(val.find_last_not_of(chars) + 1); if(!strcmp(opt.c_str(),"pi1")){ params.pi1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"pi2")){ params.pi2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"tau_so")){ params.tau_so = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"alpha1")){ params.alpha1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sgm_q1")){ params.sgm_q1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sgm_q2")){ params.sgm_q2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"alpha2")){ params.alpha2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sigma")){ params.sigma = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"kernel_size")){ params.kernel_size = atoi(val.c_str()); } } }else{ std::cout << "File " << conf << " does not exist! " <<std::endl; exit(0); } } std::vector<std::string> getImages(std::string file){ std::vector<std::string> imageNames; std::ifstream ifs(file.c_str()); std::string line; if(ifs.is_open()){ while(std::getline(ifs,line )){ imageNames.push_back(line); } }else{ std::cout << "File " << file << " does not exist! " <<std::endl; exit(0); } return imageNames; } template<typename T> __global__ void argmin( float* disp_d, T* cost, int rows, int cols, int ndisp ){ int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x; if( Row < rows && Col < cols){ T mincost=cost[ Row*cols*ndisp+Col*ndisp ]; int d=0; for(int i=1; i<ndisp; i++){ float cd = cost[ Row*cols*ndisp+Col*ndisp +i ]; if( cd < mincost ){ mincost = cd; d = i; } } disp_d[ Row*cols+Col ] = (float)d; } } template<typename T> __global__ void argmin_d( float* disp_d, T* cost, int rows, int cols, int ndisp ){ int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x; if( Row < rows && Col < cols){ T mincost=cost[ Row*cols+Col ]; int d=0; for(int i=1; i<ndisp; i++){ float cd = cost[ i*rows*cols+Row*cols+Col ]; if( cd < mincost ){ mincost = cd; d = i; } } disp_d[ Row*cols+Col ] = (float)d; } } template<typename T> __global__ void swap_axis(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){ int Col = blockIdx.x*blockDim.x + threadIdx.x; int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< cols*rows){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if(Row+d < ndisp) tile[threadIdx.y+d][threadIdx.x] = cost [(Row+d)*rows*cols+Col ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x+threadIdx.y; Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < cols*rows && Row<ndisp) temp_cost[ (Col+d)*ndisp+Row ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void swap_axis_back(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){ int Col = blockIdx.x*blockDim.x + threadIdx.y; int Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< cols*rows){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ tile[threadIdx.y+d][threadIdx.x] = cost [(Col+d)*ndisp+Row ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x + threadIdx.x; Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < cols*rows) temp_cost[ (Row+d)*rows*cols+Col ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void transpose(const T* __restrict__ cost, T* temp_cost, const int dim1, const int dim2){ int Col = blockIdx.x*blockDim.x + threadIdx.x; int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; int disp = blockIdx.z*dim1*dim2; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< dim2){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Row+d)<dim1) tile[threadIdx.y+d][threadIdx.x] = cost [disp+(Row+d)*dim2+Col ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x+threadIdx.y; Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < dim2 && Row < dim1) temp_cost[disp+(Col+d)*dim1+Row ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void VerticalIntegralKernel(T* output, const int rows , const int cols , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Row = threadIdx.x+offset; int Col = blockIdx.y; int disp = blockIdx.z; T val=0,temp=0,temp1=0; if( threadIdx.x < rows){ val = output[disp*rows*cols+Row*cols+Col]; for(int i=1; i<32; i<<=1 ){ temp = __shfl_up(val,i); if( (threadIdx.x & 31) >=i ) val +=temp; } if( (threadIdx.x & 31) ==31 || threadIdx.x==(rows-1) ) slice_sm[threadIdx.x/32] = val; } __syncthreads(); temp=0; if( threadIdx.x < 32 ){ temp = slice_sm[threadIdx.x]; for(int i=1; i<32; i<<=1){ temp1 = __shfl_up(temp,i); if( (threadIdx.x & 31) >=i ) temp += temp1; } slice_sm[threadIdx.x] = temp; } __syncthreads(); if( Row < rows){ if(threadIdx.x >=32) val += slice_sm[threadIdx.x/32-1]; output[disp*rows*cols+Row*cols+Col] = val; } } // This kernel has to be converted to the inplace integral kernel. Profiling though shows that this is not a bottleck for the method. template<typename T,typename I> __global__ void HorizontalIntegralKernel_outofplace(T* integral_vol,const I* input, const int integrrows , const int integrcols , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Col = threadIdx.x+offset; int Row = blockIdx.x; int disp = blockIdx.z; if( Col < integrcols && disp < ndisp){ slice_sm[threadIdx.x] = input[disp*integrrows*integrcols+Row*integrcols+Col]; } if(offset>0 & threadIdx.x==0){ slice_sm[threadIdx.x] = integral_vol[disp*integrrows*integrcols+Row*integrcols+Col]; } T sum; for(int stride=1; stride< blockDim.x; stride*=2){ __syncthreads(); if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp ) sum = slice_sm[threadIdx.x] + slice_sm[threadIdx.x-stride]; __syncthreads(); if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp ) slice_sm[threadIdx.x] = sum; } if( Col<integrcols && disp < ndisp){ integral_vol[disp*integrrows*integrcols+Row*integrcols+Col] = slice_sm[threadIdx.x]; } } template<typename T> __global__ void IntegralKernel(T* output, const int dim1 , const int dim2 , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Col = threadIdx.x+offset; int Row = blockIdx.y; int disp = blockIdx.z; T val=0,temp=0,temp1=0; if( Col < dim2){ val = output[disp*dim1*dim2+Row*dim2+Col]; for(int i=1; i<32; i<<=1 ){ temp = __shfl_up(val,i); if( (threadIdx.x & 31) >=i ) val +=temp; } if( (threadIdx.x & 31) ==31 || Col==(dim2-1) ) slice_sm[threadIdx.x/32] = val; } __syncthreads(); temp=0; if( threadIdx.x < 32 ){ temp = slice_sm[threadIdx.x]; for(int i=1; i<32; i<<=1){ temp1 = __shfl_up(temp,i); if( (threadIdx.x & 31) >=i ) temp += temp1; } slice_sm[threadIdx.x] = temp; } __syncthreads(); if( Col < dim2){ if(threadIdx.x >=32) val += slice_sm[threadIdx.x/32-1]; output[disp*dim1*dim2+Row*dim2+Col] = val; } } __device__ void sort(float *x, int n) { for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } #define INDEX_D(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size3 + (dim3)) * size1 + (dim1)) * size2 + dim2) #define INDEX(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size1 + (dim1)) * size2 + (dim2)) * size3 + dim3) template <int sgm_direction,typename T> __global__ void sgm_loop(float *x0, float *x1, T *input, T *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; //step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; //step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step;//step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; //step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } extern __shared__ float sgm_shared[]; float * output_s = &sgm_shared[0]; float * output_min= &sgm_shared[size3]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = (input[INDEX(0, y, x, d)] + cost - output_min[0]); output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } template <int sgm_direction> __global__ void sgm2(uint8 *x0, uint8 *x1, float *input, float *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = blockIdx.y; //step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - blockIdx.y; //step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = blockIdx.y;//step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - blockIdx.y; //step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX_D(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } __shared__ double output_s[400], output_min[400]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = (input[INDEX(0, y, x, d)] + cost - output_min[0])*.25; output[INDEX_D(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } __global__ void cross(float *x0, float *out, int size, int dim2, int dim3, int L1, float tau1) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dir = id; int x = dir % dim3; dir /= dim3; int y = dir % dim2; dir /= dim2; int dx = 0; int dy = 0; if (dir == 0) { dx = -1; } else if (dir == 1) { dx = 1; } else if (dir == 2) { dy = -1; } else if (dir == 3) { dy = 1; } else { assert(0); } int xx, yy, ind1, ind2, dist; ind1 = y * dim3 + x; for (xx = x + dx, yy = y + dy;;xx += dx, yy += dy) { if (xx < 0 || xx >= dim3 || yy < 0 || yy >= dim2) break; dist = max(abs(xx - x), abs(yy - y)); if (dist == 1) continue; ind2 = yy * dim3 + xx; /* rule 1 */ if (COLOR_DIFF(x0, ind1, ind2) >= tau1) break; /* rule 2 */ if (dist >= L1) break; } out[id] = dir <= 1 ? xx : yy; } } template<typename T> __global__ void cbca(float *x0c, float *x1c, T *vol, T *out, int size, int dim2, int dim3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % dim3; d /= dim3; int y = d % dim2; d /= dim2; if (x + d * direction < 0 || x + d * direction >= dim3) { out[id] = vol[id]; } else { float sum = 0; int cnt = 0; int yy_s = max(x0c[(2 * dim2 + y) * dim3 + x], x1c[(2 * dim2 + y) * dim3 + x + d * direction]); int yy_t = min(x0c[(3 * dim2 + y) * dim3 + x], x1c[(3 * dim2 + y) * dim3 + x + d * direction]); for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = max(x0c[(0 * dim2 + yy) * dim3 + x], x1c[(0 * dim2 + yy) * dim3 + x + d * direction] - d * direction); int xx_t = min(x0c[(1 * dim2 + yy) * dim3 + x], x1c[(1 * dim2 + yy) * dim3 + x + d * direction] - d * direction); for (int xx = xx_s + 1; xx < xx_t; xx++) { float val = vol[(d * dim2 + yy) * dim3 + xx]; assert(!isnan(val)); sum += val; cnt++; } } assert(cnt > 0); out[id] = sum / cnt; assert(!isnan(out[id])); } } } template <typename T> __global__ void subpixel_enchancement(float *d0, T *c2, float *out, int size, int dim23, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = d0[id]; out[id] = d; if (1 <= d && d < disp_max - 1) { float cn = c2[(d - 1) * dim23 + id]; float cz = c2[d * dim23 + id]; float cp = c2[(d + 1) * dim23 + id]; float denom = 2 * (cp + cn - 2 * cz); if (denom > 1e-5) { out[id] = d - min(1.0, max(-1.0, (cp - cn) / denom)); } } } } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } __global__ void mean2d(float *img, float *kernel, float *out, int size, int kernel_radius, int dim2, int dim3, float alpha2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; int i = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++, i++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2 && abs(img[yy * dim3 + xx] - img[y * dim3 + x]) < alpha2) { sum += img[yy * dim3 + xx] * kernel[i]; cnt += kernel[i]; } } } out[id] = sum / cnt; } }
df6db9e3f84c4bbd3f2d6ceaa88ef7e90bc1b7aa.cu
typedef uint8_t uint8; #define TB 128 #define DISP_MAX 256 #define BLOCK_SIZE 32 #define XDIM_MAX_THREADS 1024 #define BLOCK_D_SIZE 64 #define COLOR_DIFF(x, i, j) (abs(x[i] - x[j])) struct postparams{ float pi1; float pi2; float tau_so; float alpha1; float sgm_q1; float sgm_q2; float alpha2; float sigma; int kernel_size; }; void parseConf(postparams &params,std::string conf ){ const std::string& chars = "\t\n\v\f\r "; std::ifstream ifs(conf.c_str()); std::string line; if(ifs.is_open()){ while(std::getline(ifs,line )){ std::string opt = line.substr(0,line.find_last_of(":")); opt.erase(0, opt.find_first_not_of(chars)); opt.erase(opt.find_last_not_of(chars) + 1); int start = line.find_last_of(":")+1; int end = line.find_first_of("#") - start; std::string val = line.substr(start,end); val.erase(0, val.find_first_not_of(chars)); val.erase(val.find_last_not_of(chars) + 1); if(!strcmp(opt.c_str(),"pi1")){ params.pi1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"pi2")){ params.pi2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"tau_so")){ params.tau_so = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"alpha1")){ params.alpha1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sgm_q1")){ params.sgm_q1 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sgm_q2")){ params.sgm_q2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"alpha2")){ params.alpha2 = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"sigma")){ params.sigma = atof(val.c_str()); }else if(!strcmp(opt.c_str(),"kernel_size")){ params.kernel_size = atoi(val.c_str()); } } }else{ std::cout << "File " << conf << " does not exist! " <<std::endl; exit(0); } } std::vector<std::string> getImages(std::string file){ std::vector<std::string> imageNames; std::ifstream ifs(file.c_str()); std::string line; if(ifs.is_open()){ while(std::getline(ifs,line )){ imageNames.push_back(line); } }else{ std::cout << "File " << file << " does not exist! " <<std::endl; exit(0); } return imageNames; } template<typename T> __global__ void argmin( float* disp_d, T* cost, int rows, int cols, int ndisp ){ int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x; if( Row < rows && Col < cols){ T mincost=cost[ Row*cols*ndisp+Col*ndisp ]; int d=0; for(int i=1; i<ndisp; i++){ float cd = cost[ Row*cols*ndisp+Col*ndisp +i ]; if( cd < mincost ){ mincost = cd; d = i; } } disp_d[ Row*cols+Col ] = (float)d; } } template<typename T> __global__ void argmin_d( float* disp_d, T* cost, int rows, int cols, int ndisp ){ int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x; if( Row < rows && Col < cols){ T mincost=cost[ Row*cols+Col ]; int d=0; for(int i=1; i<ndisp; i++){ float cd = cost[ i*rows*cols+Row*cols+Col ]; if( cd < mincost ){ mincost = cd; d = i; } } disp_d[ Row*cols+Col ] = (float)d; } } template<typename T> __global__ void swap_axis(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){ int Col = blockIdx.x*blockDim.x + threadIdx.x; int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< cols*rows){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if(Row+d < ndisp) tile[threadIdx.y+d][threadIdx.x] = cost [(Row+d)*rows*cols+Col ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x+threadIdx.y; Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < cols*rows && Row<ndisp) temp_cost[ (Col+d)*ndisp+Row ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void swap_axis_back(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){ int Col = blockIdx.x*blockDim.x + threadIdx.y; int Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< cols*rows){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ tile[threadIdx.y+d][threadIdx.x] = cost [(Col+d)*ndisp+Row ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x + threadIdx.x; Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < cols*rows) temp_cost[ (Row+d)*rows*cols+Col ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void transpose(const T* __restrict__ cost, T* temp_cost, const int dim1, const int dim2){ int Col = blockIdx.x*blockDim.x + threadIdx.x; int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y; int disp = blockIdx.z*dim1*dim2; __shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1]; if( Col< dim2){ #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Row+d)<dim1) tile[threadIdx.y+d][threadIdx.x] = cost [disp+(Row+d)*dim2+Col ]; } } __syncthreads(); Col = blockIdx.x*blockDim.x+threadIdx.y; Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x; #pragma unroll for(int d=0; d<BLOCK_D_SIZE; d+=16){ if((Col+d) < dim2 && Row < dim1) temp_cost[disp+(Col+d)*dim1+Row ] = tile[threadIdx.x][threadIdx.y+d]; } } template<typename T> __global__ void VerticalIntegralKernel(T* output, const int rows , const int cols , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Row = threadIdx.x+offset; int Col = blockIdx.y; int disp = blockIdx.z; T val=0,temp=0,temp1=0; if( threadIdx.x < rows){ val = output[disp*rows*cols+Row*cols+Col]; for(int i=1; i<32; i<<=1 ){ temp = __shfl_up(val,i); if( (threadIdx.x & 31) >=i ) val +=temp; } if( (threadIdx.x & 31) ==31 || threadIdx.x==(rows-1) ) slice_sm[threadIdx.x/32] = val; } __syncthreads(); temp=0; if( threadIdx.x < 32 ){ temp = slice_sm[threadIdx.x]; for(int i=1; i<32; i<<=1){ temp1 = __shfl_up(temp,i); if( (threadIdx.x & 31) >=i ) temp += temp1; } slice_sm[threadIdx.x] = temp; } __syncthreads(); if( Row < rows){ if(threadIdx.x >=32) val += slice_sm[threadIdx.x/32-1]; output[disp*rows*cols+Row*cols+Col] = val; } } // This kernel has to be converted to the inplace integral kernel. Profiling though shows that this is not a bottleck for the method. template<typename T,typename I> __global__ void HorizontalIntegralKernel_outofplace(T* integral_vol,const I* input, const int integrrows , const int integrcols , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Col = threadIdx.x+offset; int Row = blockIdx.x; int disp = blockIdx.z; if( Col < integrcols && disp < ndisp){ slice_sm[threadIdx.x] = input[disp*integrrows*integrcols+Row*integrcols+Col]; } if(offset>0 & threadIdx.x==0){ slice_sm[threadIdx.x] = integral_vol[disp*integrrows*integrcols+Row*integrcols+Col]; } T sum; for(int stride=1; stride< blockDim.x; stride*=2){ __syncthreads(); if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp ) sum = slice_sm[threadIdx.x] + slice_sm[threadIdx.x-stride]; __syncthreads(); if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp ) slice_sm[threadIdx.x] = sum; } if( Col<integrcols && disp < ndisp){ integral_vol[disp*integrrows*integrcols+Row*integrcols+Col] = slice_sm[threadIdx.x]; } } template<typename T> __global__ void IntegralKernel(T* output, const int dim1 , const int dim2 , const int ndisp,const int offset){ extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[]; T* slice_sm = reinterpret_cast<T *>(shared_mem); int Col = threadIdx.x+offset; int Row = blockIdx.y; int disp = blockIdx.z; T val=0,temp=0,temp1=0; if( Col < dim2){ val = output[disp*dim1*dim2+Row*dim2+Col]; for(int i=1; i<32; i<<=1 ){ temp = __shfl_up(val,i); if( (threadIdx.x & 31) >=i ) val +=temp; } if( (threadIdx.x & 31) ==31 || Col==(dim2-1) ) slice_sm[threadIdx.x/32] = val; } __syncthreads(); temp=0; if( threadIdx.x < 32 ){ temp = slice_sm[threadIdx.x]; for(int i=1; i<32; i<<=1){ temp1 = __shfl_up(temp,i); if( (threadIdx.x & 31) >=i ) temp += temp1; } slice_sm[threadIdx.x] = temp; } __syncthreads(); if( Col < dim2){ if(threadIdx.x >=32) val += slice_sm[threadIdx.x/32-1]; output[disp*dim1*dim2+Row*dim2+Col] = val; } } __device__ void sort(float *x, int n) { for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } #define INDEX_D(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size3 + (dim3)) * size1 + (dim1)) * size2 + dim2) #define INDEX(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size1 + (dim1)) * size2 + (dim2)) * size3 + dim3) template <int sgm_direction,typename T> __global__ void sgm_loop(float *x0, float *x1, T *input, T *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; //step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; //step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step;//step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; //step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } extern __shared__ float sgm_shared[]; float * output_s = &sgm_shared[0]; float * output_min= &sgm_shared[size3]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = (input[INDEX(0, y, x, d)] + cost - output_min[0]); output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } template <int sgm_direction> __global__ void sgm2(uint8 *x0, uint8 *x1, float *input, float *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = blockIdx.y; //step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - blockIdx.y; //step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = blockIdx.y;//step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - blockIdx.y; //step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX_D(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } __shared__ double output_s[400], output_min[400]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = (input[INDEX(0, y, x, d)] + cost - output_min[0])*.25; output[INDEX_D(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } __global__ void cross(float *x0, float *out, int size, int dim2, int dim3, int L1, float tau1) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dir = id; int x = dir % dim3; dir /= dim3; int y = dir % dim2; dir /= dim2; int dx = 0; int dy = 0; if (dir == 0) { dx = -1; } else if (dir == 1) { dx = 1; } else if (dir == 2) { dy = -1; } else if (dir == 3) { dy = 1; } else { assert(0); } int xx, yy, ind1, ind2, dist; ind1 = y * dim3 + x; for (xx = x + dx, yy = y + dy;;xx += dx, yy += dy) { if (xx < 0 || xx >= dim3 || yy < 0 || yy >= dim2) break; dist = max(abs(xx - x), abs(yy - y)); if (dist == 1) continue; ind2 = yy * dim3 + xx; /* rule 1 */ if (COLOR_DIFF(x0, ind1, ind2) >= tau1) break; /* rule 2 */ if (dist >= L1) break; } out[id] = dir <= 1 ? xx : yy; } } template<typename T> __global__ void cbca(float *x0c, float *x1c, T *vol, T *out, int size, int dim2, int dim3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % dim3; d /= dim3; int y = d % dim2; d /= dim2; if (x + d * direction < 0 || x + d * direction >= dim3) { out[id] = vol[id]; } else { float sum = 0; int cnt = 0; int yy_s = max(x0c[(2 * dim2 + y) * dim3 + x], x1c[(2 * dim2 + y) * dim3 + x + d * direction]); int yy_t = min(x0c[(3 * dim2 + y) * dim3 + x], x1c[(3 * dim2 + y) * dim3 + x + d * direction]); for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = max(x0c[(0 * dim2 + yy) * dim3 + x], x1c[(0 * dim2 + yy) * dim3 + x + d * direction] - d * direction); int xx_t = min(x0c[(1 * dim2 + yy) * dim3 + x], x1c[(1 * dim2 + yy) * dim3 + x + d * direction] - d * direction); for (int xx = xx_s + 1; xx < xx_t; xx++) { float val = vol[(d * dim2 + yy) * dim3 + xx]; assert(!isnan(val)); sum += val; cnt++; } } assert(cnt > 0); out[id] = sum / cnt; assert(!isnan(out[id])); } } } template <typename T> __global__ void subpixel_enchancement(float *d0, T *c2, float *out, int size, int dim23, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = d0[id]; out[id] = d; if (1 <= d && d < disp_max - 1) { float cn = c2[(d - 1) * dim23 + id]; float cz = c2[d * dim23 + id]; float cp = c2[(d + 1) * dim23 + id]; float denom = 2 * (cp + cn - 2 * cz); if (denom > 1e-5) { out[id] = d - min(1.0, max(-1.0, (cp - cn) / denom)); } } } } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } __global__ void mean2d(float *img, float *kernel, float *out, int size, int kernel_radius, int dim2, int dim3, float alpha2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; int i = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++, i++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2 && abs(img[yy * dim3 + xx] - img[y * dim3 + x]) < alpha2) { sum += img[yy * dim3 + xx] * kernel[i]; cnt += kernel[i]; } } } out[id] = sum / cnt; } }
6a1cd8470132361f39d71178e33e177d615c77fc.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include "Stopwatch.h" // CUDA runtime #include <hip/hip_runtime.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMulCUDABench(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB, int nIter) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); hipMalloc((void **) &d_A, mem_size_A); hipMalloc((void **) &d_B, mem_size_B); hipMalloc((void **) &d_C, mem_size_C); // copy host memory to device hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } else { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } hipDeviceSynchronize(); Stopwatch sw; sw.restart(); int ncount = 0; double maxtime = 2.0; for (ncount = 0; sw.getTime()<maxtime; ++ncount) { // Execute the kernel for (int j = 0; j < nIter; j++) { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } // Copy result from device to host hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); } sw.stop(); double megaflops = 1.0e-6*dimsA.x*dimsA.y*dimsB.x*ncount*nIter/sw.getTime(); printf("%f\n",megaflops); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { //printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; hipDeviceProp_t deviceProp; hipGetDevice(&devID); hipGetDeviceProperties(&deviceProp, devID); //printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; int msize; int nIter; //for (msize = 100; msize<=1000; msize+=100){ for (nIter = 2; nIter<=2048; nIter*=2){ //printf("msize = %i nIter = %i \n",msize,nIter); printf("nIter = %i \n",nIter); dim3 dimsA(msize, msize, 1); dim3 dimsB(msize, msize, 1); if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } //printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); //matrixMulCUDABench(argc, argv, block_size, dimsA, dimsB, nIter); } //} return 0; }
6a1cd8470132361f39d71178e33e177d615c77fc.cu
/** * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include "Stopwatch.h" // CUDA runtime #include <cuda_runtime.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMulCUDABench(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB, int nIter) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); cudaMalloc((void **) &d_A, mem_size_A); cudaMalloc((void **) &d_B, mem_size_B); cudaMalloc((void **) &d_C, mem_size_C); // copy host memory to device cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } cudaDeviceSynchronize(); Stopwatch sw; sw.restart(); int ncount = 0; double maxtime = 2.0; for (ncount = 0; sw.getTime()<maxtime; ++ncount) { // Execute the kernel for (int j = 0; j < nIter; j++) { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } // Copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); } sw.stop(); double megaflops = 1.0e-6*dimsA.x*dimsA.y*dimsB.x*ncount*nIter/sw.getTime(); printf("%f\n",megaflops); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char **argv) { //printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; cudaDeviceProp deviceProp; cudaGetDevice(&devID); cudaGetDeviceProperties(&deviceProp, devID); //printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; int msize; int nIter; //for (msize = 100; msize<=1000; msize+=100){ for (nIter = 2; nIter<=2048; nIter*=2){ //printf("msize = %i nIter = %i \n",msize,nIter); printf("nIter = %i \n",nIter); dim3 dimsA(msize, msize, 1); dim3 dimsB(msize, msize, 1); if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } //printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); //matrixMulCUDABench(argc, argv, block_size, dimsA, dimsB, nIter); } //} return 0; }
12baa6c10803bf192b61ca029a004090c16d9652.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defines.hpp" #include "implicitTree.h" #include <cassert> #include <cstdio> #include <map> __device__ __host__ float evalNode(node_s *tree, float x, float y, float z) { if(tree == NULL) return 0.0f; if(tree->nodeType == OPERATOR) { operator_node_s* operator_node = (operator_node_s*) tree; return (operator_node->operatorFunc)( evalNode(operator_node->node.left_child,x,y,z), evalNode(operator_node->node.right_child,x,y,z) ); } else { density_node_s* density_node = (density_node_s*) tree; return density_node->densityFunc(x,y,z); } } __host__ node_s* makeOperatorNode(node_s* left_child, node_s* right_child, operatorFunction f) { operator_node_s* operator_node = (operator_node_s*) malloc(sizeof(operator_node_s)); operator_node->node.nodeType = OPERATOR; operator_node->node.left_child = left_child; operator_node->node.right_child = right_child; operator_node->operatorFunc = f; return (node_s*) operator_node; } __host__ node_s* makeDensityNode(densityFunction f) { density_node_s* density_node = (density_node_s*) malloc(sizeof(density_node_s)); density_node->node.nodeType = DENSITY; density_node->node.left_child = NULL; density_node->node.right_child = NULL; density_node->densityFunc = f; return (node_s*) density_node; } __host__ unsigned int countNodes(node_s *tree_h) { if(tree_h == NULL) return 0u; else return 1u + countNodes(tree_h->left_child) + countNodes(tree_h->right_child); } __host__ void setVal(unsigned char* host_ptr, unsigned char *val, size_t nBytes) { for (unsigned int i = 0; i < nBytes; i++) { hipMemset(host_ptr+i, val[i], 1); } } __global__ void initNodeKernel(operator_node_s *node_d, NodeType type, node_s *leftChild, node_s *rightChild, operatorFunction function) { node_d->node.nodeType = type; node_d->node.left_child = leftChild; node_d->node.right_child = rightChild; node_d->operatorFunc = function; } __host__ void initNode(operator_node_s *node_d, NodeType type, node_s *leftChild, node_s *rightChild, operatorFunction function) { hipLaunchKernelGGL(( initNodeKernel), dim3(1),dim3(1), 0, 0, node_d,type,leftChild,rightChild,function); } __host__ unsigned int buildDeviceTree(node_s *node_h, operator_node_s *node_d) { //check if node is NULL if(node_h == NULL) return 0; //create child nodes unsigned int offset = 1; operator_node_s *leftChild, *rightChild; leftChild = (node_h->left_child == NULL ? NULL : node_d+offset); offset += buildDeviceTree(node_h->left_child, node_d+offset); rightChild = (node_h->right_child == NULL ? NULL : node_d+offset); offset += buildDeviceTree(node_h->right_child, node_d+offset); operatorFunction func = NULL; if(node_h->nodeType == OPERATOR) { operator_node_s* op_node_h = (operator_node_s*) node_h; func = operatorFunctionPointers[op_node_h->operatorFunc]; } else { density_node_s* ds_node_h = (density_node_s*) node_h; func = (operatorFunction)densityFunctionPointers[ds_node_h->densityFunc]; } PRINTD("Creating node %p (type=%i,l=%p,r=%p,func=%p)\n", node_d, node_h->nodeType, leftChild, rightChild,func); initNode(node_d, node_h->nodeType, (node_s*)leftChild, (node_s*)rightChild, func); operator_node_s testNode; hipMemcpy(&testNode, node_d, sizeof(operator_node_s), hipMemcpyDeviceToHost); PRINTD("Got node \t\t(type=%i,l=%p,r=%p,func=%p)\n", testNode.node.nodeType, testNode.node.left_child, testNode.node.right_child,testNode.operatorFunc); return offset; } __host__ node_s* makeDeviceTreeFromHost(node_s *tree_h) { assert(sizeof(operatorFunction) == sizeof(densityFunction)); const unsigned int nNode = countNodes(tree_h); PRINTD("There are %i nodes !\n", nNode); operator_node_s *data_d; hipMalloc(&data_d, nNode*sizeof(operator_node_s)); assert(buildDeviceTree(tree_h, data_d) == nNode); return (node_s*)data_d; } __global__ void testKernel(float *x, float *y, float *z, float *res, node_s *tree) { unsigned int ix = blockIdx.x; unsigned int iy = blockIdx.y; unsigned int iz = blockIdx.z; unsigned int i = iz*(gridDim.x*gridDim.y) + iy*gridDim.x + ix; res[i] = evalNode(tree,x[ix],y[iy],z[iz]); } __host__ void computeTestKernel(float *x, float *y, float *z, float *res, node_s *tree) { dim3 gridDim(10,10,10); dim3 blockDim(1,1,1); hipLaunchKernelGGL(( testKernel), dim3(gridDim),dim3(blockDim), 0, 0, x,y,z,res,tree); }
12baa6c10803bf192b61ca029a004090c16d9652.cu
#include "defines.hpp" #include "implicitTree.h" #include <cassert> #include <cstdio> #include <map> __device__ __host__ float evalNode(node_s *tree, float x, float y, float z) { if(tree == NULL) return 0.0f; if(tree->nodeType == OPERATOR) { operator_node_s* operator_node = (operator_node_s*) tree; return (operator_node->operatorFunc)( evalNode(operator_node->node.left_child,x,y,z), evalNode(operator_node->node.right_child,x,y,z) ); } else { density_node_s* density_node = (density_node_s*) tree; return density_node->densityFunc(x,y,z); } } __host__ node_s* makeOperatorNode(node_s* left_child, node_s* right_child, operatorFunction f) { operator_node_s* operator_node = (operator_node_s*) malloc(sizeof(operator_node_s)); operator_node->node.nodeType = OPERATOR; operator_node->node.left_child = left_child; operator_node->node.right_child = right_child; operator_node->operatorFunc = f; return (node_s*) operator_node; } __host__ node_s* makeDensityNode(densityFunction f) { density_node_s* density_node = (density_node_s*) malloc(sizeof(density_node_s)); density_node->node.nodeType = DENSITY; density_node->node.left_child = NULL; density_node->node.right_child = NULL; density_node->densityFunc = f; return (node_s*) density_node; } __host__ unsigned int countNodes(node_s *tree_h) { if(tree_h == NULL) return 0u; else return 1u + countNodes(tree_h->left_child) + countNodes(tree_h->right_child); } __host__ void setVal(unsigned char* host_ptr, unsigned char *val, size_t nBytes) { for (unsigned int i = 0; i < nBytes; i++) { cudaMemset(host_ptr+i, val[i], 1); } } __global__ void initNodeKernel(operator_node_s *node_d, NodeType type, node_s *leftChild, node_s *rightChild, operatorFunction function) { node_d->node.nodeType = type; node_d->node.left_child = leftChild; node_d->node.right_child = rightChild; node_d->operatorFunc = function; } __host__ void initNode(operator_node_s *node_d, NodeType type, node_s *leftChild, node_s *rightChild, operatorFunction function) { initNodeKernel<<<1,1>>>(node_d,type,leftChild,rightChild,function); } __host__ unsigned int buildDeviceTree(node_s *node_h, operator_node_s *node_d) { //check if node is NULL if(node_h == NULL) return 0; //create child nodes unsigned int offset = 1; operator_node_s *leftChild, *rightChild; leftChild = (node_h->left_child == NULL ? NULL : node_d+offset); offset += buildDeviceTree(node_h->left_child, node_d+offset); rightChild = (node_h->right_child == NULL ? NULL : node_d+offset); offset += buildDeviceTree(node_h->right_child, node_d+offset); operatorFunction func = NULL; if(node_h->nodeType == OPERATOR) { operator_node_s* op_node_h = (operator_node_s*) node_h; func = operatorFunctionPointers[op_node_h->operatorFunc]; } else { density_node_s* ds_node_h = (density_node_s*) node_h; func = (operatorFunction)densityFunctionPointers[ds_node_h->densityFunc]; } PRINTD("Creating node %p (type=%i,l=%p,r=%p,func=%p)\n", node_d, node_h->nodeType, leftChild, rightChild,func); initNode(node_d, node_h->nodeType, (node_s*)leftChild, (node_s*)rightChild, func); operator_node_s testNode; cudaMemcpy(&testNode, node_d, sizeof(operator_node_s), cudaMemcpyDeviceToHost); PRINTD("Got node \t\t(type=%i,l=%p,r=%p,func=%p)\n", testNode.node.nodeType, testNode.node.left_child, testNode.node.right_child,testNode.operatorFunc); return offset; } __host__ node_s* makeDeviceTreeFromHost(node_s *tree_h) { assert(sizeof(operatorFunction) == sizeof(densityFunction)); const unsigned int nNode = countNodes(tree_h); PRINTD("There are %i nodes !\n", nNode); operator_node_s *data_d; cudaMalloc(&data_d, nNode*sizeof(operator_node_s)); assert(buildDeviceTree(tree_h, data_d) == nNode); return (node_s*)data_d; } __global__ void testKernel(float *x, float *y, float *z, float *res, node_s *tree) { unsigned int ix = blockIdx.x; unsigned int iy = blockIdx.y; unsigned int iz = blockIdx.z; unsigned int i = iz*(gridDim.x*gridDim.y) + iy*gridDim.x + ix; res[i] = evalNode(tree,x[ix],y[iy],z[iz]); } __host__ void computeTestKernel(float *x, float *y, float *z, float *res, node_s *tree) { dim3 gridDim(10,10,10); dim3 blockDim(1,1,1); testKernel<<<gridDim,blockDim>>>(x,y,z,res,tree); }
3abeaf9ddebe01ca92be0605d29f8bd27723185a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1./sqrt(variance[filter] + .000001f)); } }
3abeaf9ddebe01ca92be0605d29f8bd27723185a.cu
#include "includes.h" extern "C" { } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1./sqrt(variance[filter] + .000001f)); } }
a6416fe88d80fbda882045fb7a3601da1827848b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO Parquet reader class implementation */ #include "reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> #include <algorithm> #include <array> #include <regex> namespace cudf { namespace experimental { namespace io { namespace detail { namespace parquet { // Import functionality that's independent of legacy code using namespace cudf::io::parquet; using namespace cudf::io; namespace { /** * @brief Function that translates Parquet datatype to cuDF type enum */ constexpr type_id to_type_id(parquet::Type physical, parquet::ConvertedType logical, bool strings_to_categorical, type_id timestamp_type_id, int32_t decimal_scale) { // Logical type used for actual data interpretation; the legacy converted type // is superceded by 'logical' type whenever available. switch (logical) { case parquet::UINT_8: case parquet::INT_8: return type_id::INT8; case parquet::UINT_16: case parquet::INT_16: return type_id::INT16; case parquet::DATE: return type_id::TIMESTAMP_DAYS; case parquet::TIMESTAMP_MICROS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MICROSECONDS; case parquet::TIMESTAMP_MILLIS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MILLISECONDS; case parquet::DECIMAL: if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) { return type_id::FLOAT64; } break; default: break; } // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { case parquet::BOOLEAN: return type_id::BOOL8; case parquet::INT32: return type_id::INT32; case parquet::INT64: return type_id::INT64; case parquet::FLOAT: return type_id::FLOAT32; case parquet::DOUBLE: return type_id::FLOAT64; case parquet::BYTE_ARRAY: case parquet::FIXED_LEN_BYTE_ARRAY: // Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring) return strings_to_categorical ? type_id::INT32 : type_id::STRING; case parquet::INT96: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; default: break; } return type_id::EMPTY; } /** * @brief Function that translates cuDF time unit to Parquet clock frequency */ constexpr int32_t to_clockrate(type_id timestamp_type_id) { switch (timestamp_type_id) { case type_id::TIMESTAMP_SECONDS: return 1; case type_id::TIMESTAMP_MILLISECONDS: return 1000; case type_id::TIMESTAMP_MICROSECONDS: return 1000000; case type_id::TIMESTAMP_NANOSECONDS: return 1000000000; default: return 0; } } /** * @brief Function that returns the required the number of bits to store a value */ template <typename T = uint8_t> T required_bits(uint32_t max_level) { return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level)); } std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id, type_id timestamp_type_id, parquet::Type physical, int8_t converted, int32_t length) { int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8) { type_width = 1; // I32 -> I8 } else if (column_type_id == type_id::INT16) { type_width = 2; // I32 -> I16 } else if (column_type_id == type_id::INT32) { type_width = 4; // str -> hash32 } else if (is_timestamp(data_type{column_type_id})) { clock_rate = to_clockrate(timestamp_type_id); } int8_t converted_type = converted; if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64) { converted_type = parquet::UNKNOWN; // Not converting to float64 } return std::make_tuple(type_width, clock_rate, converted_type); } } // namespace /** * @brief Class for parsing dataset metadata */ struct metadata : public FileMetaData { explicit metadata(datasource *source) { constexpr auto header_len = sizeof(file_header_s); constexpr auto ender_len = sizeof(file_ender_s); const auto len = source->size(); const auto header_buffer = source->get_buffer(0, header_len); const auto header = (const file_header_s *)header_buffer->data(); const auto ender_buffer = source->get_buffer(len - ender_len, ender_len); const auto ender = (const file_ender_s *)ender_buffer->data(); CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source"); CUDF_EXPECTS( header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC, "Corrupted header or footer"); CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len), "Incorrect footer length"); const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len); CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); } inline int get_total_rows() const { return num_rows; } inline int get_num_row_groups() const { return row_groups.size(); } inline int get_num_columns() const { return row_groups[0].columns.size(); } std::string get_column_name(const std::vector<std::string> &path_in_schema) { std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : ""; for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; } return s; } std::vector<std::string> get_column_names() { std::vector<std::string> all_names; if (row_groups.size() != 0) { for (const auto &chunk : row_groups[0].columns) { all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema)); } } return all_names; } /** * @brief Extracts the pandas "index_columns" section * * PANDAS adds its own metadata to the key_value section when writing out the * dataframe to a file to aid in exact reconstruction. The JSON-formatted * metadata contains the index column(s) and PANDA-specific datatypes. * * @return comma-separated index column names in quotes */ std::string get_pandas_index() { auto it = std::find_if(key_value_metadata.begin(), key_value_metadata.end(), [](const auto &item) { return item.key == "pandas"; }); if (it != key_value_metadata.end()) { // Captures a list of quoted strings found inside square brackets after `"index_columns":` // Inside quotes supports newlines, brackets, escaped quotes, etc. // One-liner regex: // "index_columns"\s*:\s*\[\s*((?:"(?:|(?:.*?(?![^\\]")).?)[^\\]?",?\s*)*)\] // Documented below. std::regex index_columns_expr{ R"("index_columns"\s*:\s*\[\s*)" // match preamble, opening square bracket, whitespace R"(()" // Open first capturing group R"((?:")" // Open non-capturing group match opening quote R"((?:|(?:.*?(?![^\\]")).?))" // match empty string or anything between quotes R"([^\\]?")" // Match closing non-escaped quote R"(,?\s*)" // Match optional comma and whitespace R"()*)" // Close non-capturing group and repeat 0 or more times R"())" // Close first capturing group R"(\])" // Match closing square brackets }; std::smatch sm; if (std::regex_search(it->value, sm, index_columns_expr)) { return std::move(sm[1].str()); } } return ""; } /** * @brief Extracts the column name(s) used for the row indexes in a dataframe * * @param names List of column names to load, where index column name(s) will be added */ void add_pandas_index_names(std::vector<std::string>& names) { auto str = get_pandas_index(); if (str.length() != 0) { std::regex index_name_expr{R"(\"((?:\\.|[^\"])*)\")"}; std::smatch sm; while (std::regex_search(str, sm, index_name_expr)) { if (sm.size() == 2) { // 2 = whole match, first item if (std::find(names.begin(), names.end(), sm[1].str()) == names.end()) { std::regex esc_quote{R"(\\")"}; names.emplace_back(std::move(std::regex_replace(sm[1].str(), esc_quote, R"(")"))); } } str = sm.suffix(); } } } /** * @brief Filters and reduces down to a selection of row groups * * @param row_group Index of the row group to select * @param max_rowgroup_count Max number of consecutive row groups if > 0 * @param row_start Starting row of the selection * @param row_count Total number of rows selected * * @return List of row group indexes and its starting row */ auto select_row_groups(int row_group, int max_rowgroup_count, int &row_start, int &row_count) { std::vector<std::pair<int, int>> selection; if (row_group != -1) { CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group"); for (int i = 0; i < row_group; ++i) { row_start += row_groups[i].num_rows; } row_count = 0; do { selection.emplace_back(row_group, row_start + row_count); row_count += row_groups[row_group].num_rows; } while (--max_rowgroup_count > 0 && ++row_group < get_num_row_groups()); } else { row_start = ::max(row_start, 0); if (row_count == -1) { row_count = get_total_rows(); } CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start"); for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) { count += row_groups[i].num_rows; if (count > row_start || count == 0) { selection.emplace_back(i, count - row_groups[i].num_rows); } if (count >= (row_start + row_count)) { break; } } } return selection; } /** * @brief Filters and reduces down to a selection of columns * * @param use_names List of column names to select * @param include_index Whether to always include the PANDAS index column(s) * * @return List of column names */ auto select_columns(std::vector<std::string> use_names, bool include_index) { std::vector<std::pair<int, std::string>> selection; const auto names = get_column_names(); if (use_names.empty()) { // No columns specified; include all in the dataset for (const auto &name : names) { selection.emplace_back(selection.size(), name); } } else { // Load subset of columns; include PANDAS index unless excluded if (include_index) { add_pandas_index_names(use_names); } for (const auto &use_name : use_names) { for (size_t i = 0; i < names.size(); ++i) { if (names[i] == use_name) { selection.emplace_back(i, names[i]); break; } } } } return selection; } }; size_t reader::impl::count_page_headers( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hipStream_t stream) { size_t total_pages = 0; CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream)); CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); for (size_t c = 0; c < chunks.size(); c++) { total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } void reader::impl::decode_page_headers( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, hipStream_t stream) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream)); CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); } rmm::device_buffer reader::impl::decompress_page_data( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, hipStream_t stream) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_vector<uint8_t> debrotli_scratch; // Count the exact number of compressed pages size_t num_comp_pages = 0; size_t total_decomp_size = 0; std::array<std::pair<parquet::Compression, size_t>, 3> codecs{ std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0), std::make_pair(parquet::BROTLI, 0)}; for (auto &codec : codecs) { for_each_codec_page(codec.first, [&](size_t page) { total_decomp_size += pages[page].uncompressed_page_size; codec.second++; num_comp_pages++; }); if (codec.first == parquet::BROTLI && codec.second > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second)); } } // Dispatch batches of pages to decompress for each codec rmm::device_buffer decomp_pages(total_decomp_size, stream); hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream); hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages, stream); size_t decomp_offset = 0; int32_t argc = 0; for (const auto &codec : codecs) { if (codec.second > 0) { int32_t start_pos = argc; for_each_codec_page(codec.first, [&](size_t page) { auto dst_base = static_cast<uint8_t *>(decomp_pages.data()); inflate_in[argc].srcDevice = pages[page].page_data; inflate_in[argc].srcSize = pages[page].compressed_page_size; inflate_in[argc].dstDevice = dst_base + decomp_offset; inflate_in[argc].dstSize = pages[page].uncompressed_page_size; inflate_out[argc].bytes_written = 0; inflate_out[argc].status = static_cast<uint32_t>(-1000); inflate_out[argc].reserved = 0; pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice; decomp_offset += inflate_in[argc].dstSize; argc++; }); CUDA_TRY(hipMemcpyAsync( inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos), sizeof(decltype(inflate_in)::value_type) * (argc - start_pos), hipMemcpyHostToDevice, stream)); CUDA_TRY(hipMemcpyAsync( inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), hipMemcpyHostToDevice, stream)); switch (codec.first) { case parquet::GZIP: CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, 1, stream)) break; case parquet::SNAPPY: CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, stream)); break; case parquet::BROTLI: CUDA_TRY(gpu_debrotli( inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(), debrotli_scratch.size(), argc - start_pos, stream)); break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } CUDA_TRY(hipMemcpyAsync( inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), hipMemcpyDeviceToHost, stream)); } } CUDA_TRY(hipStreamSynchronize(stream)); // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer CUDA_TRY(hipMemcpyAsync(pages.device_ptr(), pages.host_ptr(), pages.memory_size(), hipMemcpyHostToDevice, stream)); return decomp_pages; } void reader::impl::decode_page_data( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, size_t min_row, size_t total_rows, const std::vector<int> &chunk_map, std::vector<column_buffer> &out_buffers, hipStream_t stream) { auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements rmm::device_vector<gpu::nvstrdesc_s> str_dict_index; if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); } // Update chunks with pointers to column data for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs; str_ofs += pages[page_count].num_values; } chunks[c].column_data_base = out_buffers[chunk_map[c]].data(); chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask(); page_count += chunks[c].max_num_pages; } CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream)); if (total_str_dict_indexes > 0) { CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), stream)); } CUDA_TRY(gpu::DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(), chunks.size(), total_rows, min_row, stream)); CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); for (size_t i = 0; i < pages.size(); i++) { if (pages[i].num_rows > 0) { const size_t c = pages[i].chunk_idx; if (c < chunks.size()) { out_buffers[chunk_map[c]].null_count() += pages[i].num_rows - pages[i].valid_count; } } } } reader::impl::impl(std::unique_ptr<datasource> source, reader_options const &options, rmm::mr::device_memory_resource *mr) : _source(std::move(source)), _mr(mr) { // Open and parse the source dataset metadata _metadata = std::make_unique<metadata>(_source.get()); // Select only columns required by the options _selected_columns = _metadata->select_columns( options.columns, options.use_pandas_metadata); // Override output timestamp resolution if requested if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; } // Strings may be returned as either string or categorical columns _strings_to_categorical = options.strings_to_categorical; } table_with_metadata reader::impl::read(int skip_rows, int num_rows, int row_group, int max_rowgroup_count, hipStream_t stream) { std::vector<std::unique_ptr<column>> out_columns; table_metadata out_metadata; // Select only row groups required const auto selected_row_groups = _metadata->select_row_groups(row_group, max_rowgroup_count, skip_rows, num_rows); // Get a list of column data types std::vector<data_type> column_types; if (_metadata->row_groups.size() != 0) { for (const auto &col : _selected_columns) { auto &col_schema = _metadata->schema[_metadata->row_groups[0].columns[col.first].schema_idx]; auto col_type = to_type_id(col_schema.type, col_schema.converted_type, _strings_to_categorical, _timestamp_type.id(), col_schema.decimal_scale); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } } out_columns.reserve(column_types.size()); if (selected_row_groups.size() != 0 && column_types.size() != 0) { // Descriptors for all the chunks that make up the selected columns const auto num_columns = _selected_columns.size(); const auto num_chunks = selected_row_groups.size() * num_columns; hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream); // Association between each column chunk and its column std::vector<int> chunk_map(num_chunks); // Tracker for eventually deallocating compressed and uncompressed data std::vector<rmm::device_buffer> page_data(num_chunks); // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; for (const auto &rg : selected_row_groups) { auto row_group = _metadata->row_groups[rg.first]; auto row_group_start = rg.second; auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); for (size_t i = 0; i < num_columns; ++i) { auto col = _selected_columns[i]; auto &col_meta = row_group.columns[col.first].meta_data; auto &col_schema = _metadata->schema[row_group.columns[col.first].schema_idx]; // Spec requires each row group to contain exactly one chunk for every // column. If there are too many or too few, continue with best effort if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) { std::cerr << "Detected mismatched column chunk" << std::endl; continue; } if (chunks.size() >= chunks.max_size()) { std::cerr << "Detected too many column chunks" << std::endl; continue; } int32_t type_width; int32_t clock_rate; int8_t converted_type; std::tie(type_width, clock_rate, converted_type) = conversion_info( column_types[i].id(), _timestamp_type.id(), col_schema.type, col_schema.converted_type, col_schema.type_length); uint8_t *d_compdata = nullptr; if (col_meta.total_compressed_size != 0) { const auto offset = (col_meta.dictionary_page_offset != 0) ? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; auto buffer = _source->get_buffer(offset, col_meta.total_compressed_size); page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size(), stream); d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data()); } chunks.insert(gpu::ColumnChunkDesc( col_meta.total_compressed_size, d_compdata, col_meta.num_values, col_schema.type, type_width, row_group_start, row_group_rows, col_schema.max_definition_level, col_schema.max_repetition_level, required_bits(col_schema.max_definition_level), required_bits(col_schema.max_repetition_level), col_meta.codec, converted_type, col_schema.decimal_scale, clock_rate)); // Map each column chunk to its column index chunk_map[chunks.size() - 1] = i; if (col_meta.codec != Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group.num_rows; } assert(remaining_rows <= 0); // Process dataset chunk pages into output columns const auto total_pages = count_page_headers(chunks, stream); if (total_pages > 0) { hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream); rmm::device_buffer decomp_page_data; decode_page_headers(chunks, pages, stream); if (total_decompressed_size > 0) { decomp_page_data = decompress_page_data(chunks, pages, stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { page_data[c].resize(0); page_data[c].shrink_to_fit(); } } } std::vector<column_buffer> out_buffers; out_buffers.reserve(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col = _selected_columns[i]; auto &col_schema = _metadata->schema[_metadata->row_groups[selected_row_groups[0].first] .columns[col.first] .schema_idx]; bool is_nullable = (col_schema.max_definition_level != 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr); } decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map, out_buffers, stream); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(column_types[i], num_rows, out_buffers[i], stream, _mr)); } } } // Create empty columns as needed for (size_t i = out_columns.size(); i < column_types.size(); ++i) { out_columns.emplace_back(make_empty_column(column_types[i])); } // Return column names (must match order of returned columns) out_metadata.column_names.resize(_selected_columns.size()); for (size_t i = 0; i < _selected_columns.size(); i++) { out_metadata.column_names[i] = _selected_columns[i].second; } // Return user metadata for (const auto& kv : _metadata->key_value_metadata) { out_metadata.user_data.insert({kv.key, kv.value}); } return { std::make_unique<table>(std::move(out_columns)), std::move(out_metadata) }; } // Forward to implementation reader::reader(std::string filepath, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) { } // Forward to implementation reader::reader(const char *buffer, size_t length, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(buffer, length), options, mr)) {} // Forward to implementation reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {} // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read_all(hipStream_t stream) { return _impl->read(0, -1, -1, -1, stream); } // Forward to implementation table_with_metadata reader::read_row_group(size_type row_group, size_type row_group_count, hipStream_t stream) { return _impl->read(0, -1, row_group, row_group_count, stream); } // Forward to implementation table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, hipStream_t stream) { return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, stream); } } // namespace parquet } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
a6416fe88d80fbda882045fb7a3601da1827848b.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO Parquet reader class implementation */ #include "reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> #include <algorithm> #include <array> #include <regex> namespace cudf { namespace experimental { namespace io { namespace detail { namespace parquet { // Import functionality that's independent of legacy code using namespace cudf::io::parquet; using namespace cudf::io; namespace { /** * @brief Function that translates Parquet datatype to cuDF type enum */ constexpr type_id to_type_id(parquet::Type physical, parquet::ConvertedType logical, bool strings_to_categorical, type_id timestamp_type_id, int32_t decimal_scale) { // Logical type used for actual data interpretation; the legacy converted type // is superceded by 'logical' type whenever available. switch (logical) { case parquet::UINT_8: case parquet::INT_8: return type_id::INT8; case parquet::UINT_16: case parquet::INT_16: return type_id::INT16; case parquet::DATE: return type_id::TIMESTAMP_DAYS; case parquet::TIMESTAMP_MICROS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MICROSECONDS; case parquet::TIMESTAMP_MILLIS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MILLISECONDS; case parquet::DECIMAL: if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) { return type_id::FLOAT64; } break; default: break; } // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { case parquet::BOOLEAN: return type_id::BOOL8; case parquet::INT32: return type_id::INT32; case parquet::INT64: return type_id::INT64; case parquet::FLOAT: return type_id::FLOAT32; case parquet::DOUBLE: return type_id::FLOAT64; case parquet::BYTE_ARRAY: case parquet::FIXED_LEN_BYTE_ARRAY: // Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring) return strings_to_categorical ? type_id::INT32 : type_id::STRING; case parquet::INT96: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; default: break; } return type_id::EMPTY; } /** * @brief Function that translates cuDF time unit to Parquet clock frequency */ constexpr int32_t to_clockrate(type_id timestamp_type_id) { switch (timestamp_type_id) { case type_id::TIMESTAMP_SECONDS: return 1; case type_id::TIMESTAMP_MILLISECONDS: return 1000; case type_id::TIMESTAMP_MICROSECONDS: return 1000000; case type_id::TIMESTAMP_NANOSECONDS: return 1000000000; default: return 0; } } /** * @brief Function that returns the required the number of bits to store a value */ template <typename T = uint8_t> T required_bits(uint32_t max_level) { return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level)); } std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id, type_id timestamp_type_id, parquet::Type physical, int8_t converted, int32_t length) { int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8) { type_width = 1; // I32 -> I8 } else if (column_type_id == type_id::INT16) { type_width = 2; // I32 -> I16 } else if (column_type_id == type_id::INT32) { type_width = 4; // str -> hash32 } else if (is_timestamp(data_type{column_type_id})) { clock_rate = to_clockrate(timestamp_type_id); } int8_t converted_type = converted; if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64) { converted_type = parquet::UNKNOWN; // Not converting to float64 } return std::make_tuple(type_width, clock_rate, converted_type); } } // namespace /** * @brief Class for parsing dataset metadata */ struct metadata : public FileMetaData { explicit metadata(datasource *source) { constexpr auto header_len = sizeof(file_header_s); constexpr auto ender_len = sizeof(file_ender_s); const auto len = source->size(); const auto header_buffer = source->get_buffer(0, header_len); const auto header = (const file_header_s *)header_buffer->data(); const auto ender_buffer = source->get_buffer(len - ender_len, ender_len); const auto ender = (const file_ender_s *)ender_buffer->data(); CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source"); CUDF_EXPECTS( header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC, "Corrupted header or footer"); CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len), "Incorrect footer length"); const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len); CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); } inline int get_total_rows() const { return num_rows; } inline int get_num_row_groups() const { return row_groups.size(); } inline int get_num_columns() const { return row_groups[0].columns.size(); } std::string get_column_name(const std::vector<std::string> &path_in_schema) { std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : ""; for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; } return s; } std::vector<std::string> get_column_names() { std::vector<std::string> all_names; if (row_groups.size() != 0) { for (const auto &chunk : row_groups[0].columns) { all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema)); } } return all_names; } /** * @brief Extracts the pandas "index_columns" section * * PANDAS adds its own metadata to the key_value section when writing out the * dataframe to a file to aid in exact reconstruction. The JSON-formatted * metadata contains the index column(s) and PANDA-specific datatypes. * * @return comma-separated index column names in quotes */ std::string get_pandas_index() { auto it = std::find_if(key_value_metadata.begin(), key_value_metadata.end(), [](const auto &item) { return item.key == "pandas"; }); if (it != key_value_metadata.end()) { // Captures a list of quoted strings found inside square brackets after `"index_columns":` // Inside quotes supports newlines, brackets, escaped quotes, etc. // One-liner regex: // "index_columns"\s*:\s*\[\s*((?:"(?:|(?:.*?(?![^\\]")).?)[^\\]?",?\s*)*)\] // Documented below. std::regex index_columns_expr{ R"("index_columns"\s*:\s*\[\s*)" // match preamble, opening square bracket, whitespace R"(()" // Open first capturing group R"((?:")" // Open non-capturing group match opening quote R"((?:|(?:.*?(?![^\\]")).?))" // match empty string or anything between quotes R"([^\\]?")" // Match closing non-escaped quote R"(,?\s*)" // Match optional comma and whitespace R"()*)" // Close non-capturing group and repeat 0 or more times R"())" // Close first capturing group R"(\])" // Match closing square brackets }; std::smatch sm; if (std::regex_search(it->value, sm, index_columns_expr)) { return std::move(sm[1].str()); } } return ""; } /** * @brief Extracts the column name(s) used for the row indexes in a dataframe * * @param names List of column names to load, where index column name(s) will be added */ void add_pandas_index_names(std::vector<std::string>& names) { auto str = get_pandas_index(); if (str.length() != 0) { std::regex index_name_expr{R"(\"((?:\\.|[^\"])*)\")"}; std::smatch sm; while (std::regex_search(str, sm, index_name_expr)) { if (sm.size() == 2) { // 2 = whole match, first item if (std::find(names.begin(), names.end(), sm[1].str()) == names.end()) { std::regex esc_quote{R"(\\")"}; names.emplace_back(std::move(std::regex_replace(sm[1].str(), esc_quote, R"(")"))); } } str = sm.suffix(); } } } /** * @brief Filters and reduces down to a selection of row groups * * @param row_group Index of the row group to select * @param max_rowgroup_count Max number of consecutive row groups if > 0 * @param row_start Starting row of the selection * @param row_count Total number of rows selected * * @return List of row group indexes and its starting row */ auto select_row_groups(int row_group, int max_rowgroup_count, int &row_start, int &row_count) { std::vector<std::pair<int, int>> selection; if (row_group != -1) { CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group"); for (int i = 0; i < row_group; ++i) { row_start += row_groups[i].num_rows; } row_count = 0; do { selection.emplace_back(row_group, row_start + row_count); row_count += row_groups[row_group].num_rows; } while (--max_rowgroup_count > 0 && ++row_group < get_num_row_groups()); } else { row_start = std::max(row_start, 0); if (row_count == -1) { row_count = get_total_rows(); } CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start"); for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) { count += row_groups[i].num_rows; if (count > row_start || count == 0) { selection.emplace_back(i, count - row_groups[i].num_rows); } if (count >= (row_start + row_count)) { break; } } } return selection; } /** * @brief Filters and reduces down to a selection of columns * * @param use_names List of column names to select * @param include_index Whether to always include the PANDAS index column(s) * * @return List of column names */ auto select_columns(std::vector<std::string> use_names, bool include_index) { std::vector<std::pair<int, std::string>> selection; const auto names = get_column_names(); if (use_names.empty()) { // No columns specified; include all in the dataset for (const auto &name : names) { selection.emplace_back(selection.size(), name); } } else { // Load subset of columns; include PANDAS index unless excluded if (include_index) { add_pandas_index_names(use_names); } for (const auto &use_name : use_names) { for (size_t i = 0; i < names.size(); ++i) { if (names[i] == use_name) { selection.emplace_back(i, names[i]); break; } } } } return selection; } }; size_t reader::impl::count_page_headers( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, cudaStream_t stream) { size_t total_pages = 0; CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream)); CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t c = 0; c < chunks.size(); c++) { total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } void reader::impl::decode_page_headers( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, cudaStream_t stream) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream)); CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); } rmm::device_buffer reader::impl::decompress_page_data( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, cudaStream_t stream) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_vector<uint8_t> debrotli_scratch; // Count the exact number of compressed pages size_t num_comp_pages = 0; size_t total_decomp_size = 0; std::array<std::pair<parquet::Compression, size_t>, 3> codecs{ std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0), std::make_pair(parquet::BROTLI, 0)}; for (auto &codec : codecs) { for_each_codec_page(codec.first, [&](size_t page) { total_decomp_size += pages[page].uncompressed_page_size; codec.second++; num_comp_pages++; }); if (codec.first == parquet::BROTLI && codec.second > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second)); } } // Dispatch batches of pages to decompress for each codec rmm::device_buffer decomp_pages(total_decomp_size, stream); hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream); hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages, stream); size_t decomp_offset = 0; int32_t argc = 0; for (const auto &codec : codecs) { if (codec.second > 0) { int32_t start_pos = argc; for_each_codec_page(codec.first, [&](size_t page) { auto dst_base = static_cast<uint8_t *>(decomp_pages.data()); inflate_in[argc].srcDevice = pages[page].page_data; inflate_in[argc].srcSize = pages[page].compressed_page_size; inflate_in[argc].dstDevice = dst_base + decomp_offset; inflate_in[argc].dstSize = pages[page].uncompressed_page_size; inflate_out[argc].bytes_written = 0; inflate_out[argc].status = static_cast<uint32_t>(-1000); inflate_out[argc].reserved = 0; pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice; decomp_offset += inflate_in[argc].dstSize; argc++; }); CUDA_TRY(cudaMemcpyAsync( inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos), sizeof(decltype(inflate_in)::value_type) * (argc - start_pos), cudaMemcpyHostToDevice, stream)); CUDA_TRY(cudaMemcpyAsync( inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), cudaMemcpyHostToDevice, stream)); switch (codec.first) { case parquet::GZIP: CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, 1, stream)) break; case parquet::SNAPPY: CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, stream)); break; case parquet::BROTLI: CUDA_TRY(gpu_debrotli( inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(), debrotli_scratch.size(), argc - start_pos, stream)); break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } CUDA_TRY(cudaMemcpyAsync( inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), cudaMemcpyDeviceToHost, stream)); } } CUDA_TRY(cudaStreamSynchronize(stream)); // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer CUDA_TRY(cudaMemcpyAsync(pages.device_ptr(), pages.host_ptr(), pages.memory_size(), cudaMemcpyHostToDevice, stream)); return decomp_pages; } void reader::impl::decode_page_data( hostdevice_vector<gpu::ColumnChunkDesc> &chunks, hostdevice_vector<gpu::PageInfo> &pages, size_t min_row, size_t total_rows, const std::vector<int> &chunk_map, std::vector<column_buffer> &out_buffers, cudaStream_t stream) { auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements rmm::device_vector<gpu::nvstrdesc_s> str_dict_index; if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); } // Update chunks with pointers to column data for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs; str_ofs += pages[page_count].num_values; } chunks[c].column_data_base = out_buffers[chunk_map[c]].data(); chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask(); page_count += chunks[c].max_num_pages; } CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream)); if (total_str_dict_indexes > 0) { CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), stream)); } CUDA_TRY(gpu::DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(), chunks.size(), total_rows, min_row, stream)); CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i = 0; i < pages.size(); i++) { if (pages[i].num_rows > 0) { const size_t c = pages[i].chunk_idx; if (c < chunks.size()) { out_buffers[chunk_map[c]].null_count() += pages[i].num_rows - pages[i].valid_count; } } } } reader::impl::impl(std::unique_ptr<datasource> source, reader_options const &options, rmm::mr::device_memory_resource *mr) : _source(std::move(source)), _mr(mr) { // Open and parse the source dataset metadata _metadata = std::make_unique<metadata>(_source.get()); // Select only columns required by the options _selected_columns = _metadata->select_columns( options.columns, options.use_pandas_metadata); // Override output timestamp resolution if requested if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; } // Strings may be returned as either string or categorical columns _strings_to_categorical = options.strings_to_categorical; } table_with_metadata reader::impl::read(int skip_rows, int num_rows, int row_group, int max_rowgroup_count, cudaStream_t stream) { std::vector<std::unique_ptr<column>> out_columns; table_metadata out_metadata; // Select only row groups required const auto selected_row_groups = _metadata->select_row_groups(row_group, max_rowgroup_count, skip_rows, num_rows); // Get a list of column data types std::vector<data_type> column_types; if (_metadata->row_groups.size() != 0) { for (const auto &col : _selected_columns) { auto &col_schema = _metadata->schema[_metadata->row_groups[0].columns[col.first].schema_idx]; auto col_type = to_type_id(col_schema.type, col_schema.converted_type, _strings_to_categorical, _timestamp_type.id(), col_schema.decimal_scale); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } } out_columns.reserve(column_types.size()); if (selected_row_groups.size() != 0 && column_types.size() != 0) { // Descriptors for all the chunks that make up the selected columns const auto num_columns = _selected_columns.size(); const auto num_chunks = selected_row_groups.size() * num_columns; hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream); // Association between each column chunk and its column std::vector<int> chunk_map(num_chunks); // Tracker for eventually deallocating compressed and uncompressed data std::vector<rmm::device_buffer> page_data(num_chunks); // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; for (const auto &rg : selected_row_groups) { auto row_group = _metadata->row_groups[rg.first]; auto row_group_start = rg.second; auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); for (size_t i = 0; i < num_columns; ++i) { auto col = _selected_columns[i]; auto &col_meta = row_group.columns[col.first].meta_data; auto &col_schema = _metadata->schema[row_group.columns[col.first].schema_idx]; // Spec requires each row group to contain exactly one chunk for every // column. If there are too many or too few, continue with best effort if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) { std::cerr << "Detected mismatched column chunk" << std::endl; continue; } if (chunks.size() >= chunks.max_size()) { std::cerr << "Detected too many column chunks" << std::endl; continue; } int32_t type_width; int32_t clock_rate; int8_t converted_type; std::tie(type_width, clock_rate, converted_type) = conversion_info( column_types[i].id(), _timestamp_type.id(), col_schema.type, col_schema.converted_type, col_schema.type_length); uint8_t *d_compdata = nullptr; if (col_meta.total_compressed_size != 0) { const auto offset = (col_meta.dictionary_page_offset != 0) ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; auto buffer = _source->get_buffer(offset, col_meta.total_compressed_size); page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size(), stream); d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data()); } chunks.insert(gpu::ColumnChunkDesc( col_meta.total_compressed_size, d_compdata, col_meta.num_values, col_schema.type, type_width, row_group_start, row_group_rows, col_schema.max_definition_level, col_schema.max_repetition_level, required_bits(col_schema.max_definition_level), required_bits(col_schema.max_repetition_level), col_meta.codec, converted_type, col_schema.decimal_scale, clock_rate)); // Map each column chunk to its column index chunk_map[chunks.size() - 1] = i; if (col_meta.codec != Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group.num_rows; } assert(remaining_rows <= 0); // Process dataset chunk pages into output columns const auto total_pages = count_page_headers(chunks, stream); if (total_pages > 0) { hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream); rmm::device_buffer decomp_page_data; decode_page_headers(chunks, pages, stream); if (total_decompressed_size > 0) { decomp_page_data = decompress_page_data(chunks, pages, stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { page_data[c].resize(0); page_data[c].shrink_to_fit(); } } } std::vector<column_buffer> out_buffers; out_buffers.reserve(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col = _selected_columns[i]; auto &col_schema = _metadata->schema[_metadata->row_groups[selected_row_groups[0].first] .columns[col.first] .schema_idx]; bool is_nullable = (col_schema.max_definition_level != 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr); } decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map, out_buffers, stream); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(column_types[i], num_rows, out_buffers[i], stream, _mr)); } } } // Create empty columns as needed for (size_t i = out_columns.size(); i < column_types.size(); ++i) { out_columns.emplace_back(make_empty_column(column_types[i])); } // Return column names (must match order of returned columns) out_metadata.column_names.resize(_selected_columns.size()); for (size_t i = 0; i < _selected_columns.size(); i++) { out_metadata.column_names[i] = _selected_columns[i].second; } // Return user metadata for (const auto& kv : _metadata->key_value_metadata) { out_metadata.user_data.insert({kv.key, kv.value}); } return { std::make_unique<table>(std::move(out_columns)), std::move(out_metadata) }; } // Forward to implementation reader::reader(std::string filepath, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) { } // Forward to implementation reader::reader(const char *buffer, size_t length, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(buffer, length), options, mr)) {} // Forward to implementation reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {} // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read_all(cudaStream_t stream) { return _impl->read(0, -1, -1, -1, stream); } // Forward to implementation table_with_metadata reader::read_row_group(size_type row_group, size_type row_group_count, cudaStream_t stream) { return _impl->read(0, -1, row_group, row_group_count, stream); } // Forward to implementation table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, cudaStream_t stream) { return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, stream); } } // namespace parquet } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
f0ee4f2f507a7fd52b0c61e412852362b31c4bf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int posx = blockDim.x*blockIdx.x + threadIdx.x; int posy = blockDim.y*blockIdx.y + threadIdx.y; if (posx < numCols){ if (posy < numRows){ uchar4 rgba = rgbaImage[posy * numCols + posx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[posy * numCols + posx] = channelSum; } } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int gridSizex = 1 + (numCols / 32); int gridSizey = 1 + (numRows / 32); const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize(gridSizex, gridSizey, 1); //TODO rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
f0ee4f2f507a7fd52b0c61e412852362b31c4bf8.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int posx = blockDim.x*blockIdx.x + threadIdx.x; int posy = blockDim.y*blockIdx.y + threadIdx.y; if (posx < numCols){ if (posy < numRows){ uchar4 rgba = rgbaImage[posy * numCols + posx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[posy * numCols + posx] = channelSum; } } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int gridSizex = 1 + (numCols / 32); int gridSizey = 1 + (numRows / 32); const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize(gridSizex, gridSizey, 1); //TODO rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
d104374c4a563ff588d6ce80cee52c191658dca5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication and is exactly the same as * Chapter 7 of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * CUBLAS provides high-performance matrix multiplication. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <matrixMul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int, bool); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// runTest(int argc, char** argv) { if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); // set seed for rand() srand(2006); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A, false); randomInit(h_B, size_B, false); // allocate device memory float* d_A; cutilSafeCall(hipMalloc((void**) &d_A, mem_size_A)); float* d_B; cutilSafeCall(hipMalloc((void**) &d_B, mem_size_B)); // copy host memory to device cutilSafeCall(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) ); cutilSafeCall(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cutilSafeCall(hipMalloc((void**) &d_C, mem_size_C)); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); // setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); // execute the kernel hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); // copy result from device to host cutilSafeCall(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); // stop and destroy timer cutilCheckError(cutStopTimer(timer)); printf("Processing time: %f (ms) \n", cutGetTimerValue(timer)); cutilCheckError(cutDeleteTimer(timer)); /* // compute reference solution in CPU float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, h_B, HA, WA, WB); // check result CUTBoolean res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); if (res!=1) printDiff(reference, h_C, WC, HC); */ // clean up memory free(h_A); free(h_B); free(h_C); free(reference); cutilSafeCall(hipFree(d_A)); cutilSafeCall(hipFree(d_B)); cutilSafeCall(hipFree(d_C)); hipDeviceReset(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size,bool randVal) { for (int i = 0; i < size; ++i) if(randVal) { data[i] = rand() / (float)RAND_MAX; }else data[i]=1; } void printDiff(float *data1, float *data2, int width, int height) { int i,j,k; int error_count=0; for (j=0; j<height; j++) { for (i=0; i<width; i++) { k = j*width+i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
d104374c4a563ff588d6ce80cee52c191658dca5.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ /* Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication and is exactly the same as * Chapter 7 of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * CUBLAS provides high-performance matrix multiplication. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <matrixMul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int, bool); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// runTest(int argc, char** argv) { if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); // set seed for rand() srand(2006); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A, false); randomInit(h_B, size_B, false); // allocate device memory float* d_A; cutilSafeCall(cudaMalloc((void**) &d_A, mem_size_A)); float* d_B; cutilSafeCall(cudaMalloc((void**) &d_B, mem_size_B)); // copy host memory to device cutilSafeCall(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); cutilSafeCall(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cutilSafeCall(cudaMalloc((void**) &d_C, mem_size_C)); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); // setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); // execute the kernel matrixMul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB); // check if kernel execution generated and error cutilCheckMsg("Kernel execution failed"); // copy result from device to host cutilSafeCall(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); // stop and destroy timer cutilCheckError(cutStopTimer(timer)); printf("Processing time: %f (ms) \n", cutGetTimerValue(timer)); cutilCheckError(cutDeleteTimer(timer)); /* // compute reference solution in CPU float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, h_B, HA, WA, WB); // check result CUTBoolean res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); if (res!=1) printDiff(reference, h_C, WC, HC); */ // clean up memory free(h_A); free(h_B); free(h_C); free(reference); cutilSafeCall(cudaFree(d_A)); cutilSafeCall(cudaFree(d_B)); cutilSafeCall(cudaFree(d_C)); cudaThreadExit(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size,bool randVal) { for (int i = 0; i < size; ++i) if(randVal) { data[i] = rand() / (float)RAND_MAX; }else data[i]=1; } void printDiff(float *data1, float *data2, int width, int height) { int i,j,k; int error_count=0; for (j=0; j<height; j++) { for (i=0; i<width; i++) { k = j*width+i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
7026e0f10a8ae2cc57bc8eef769cbe8188fa5381.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z #define COMPLEX #define BLOCKSIZE 8 #define WARP_SIZE 8 #define WRP 8 #define WRQ 4 #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION >= 7000) __device__ void ztrsv_lower_8kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_upper_8kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_lower_8kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_lower_8kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_lower_8kernel_1( dA, dB ); break; case 2: ztrsv_lower_8kernel_2( dA, dB ); break; case 3: ztrsv_lower_8kernel_3( dA, dB ); break; case 4: ztrsv_lower_8kernel_4( dA, dB ); break; case 5: ztrsv_lower_8kernel_5( dA, dB ); break; case 6: ztrsv_lower_8kernel_6( dA, dB ); break; case 7: ztrsv_lower_8kernel_7( dA, dB ); break; case 8: ztrsv_lower_8kernel_8( dA, dB ); break; default: ztrsv_lower_8kernel_general( dA, dB, sizes ); break; } } } __device__ void ztrsv_upper_8kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_upper_8kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_upper_8kernel_1( dA, dB ); break; case 2: ztrsv_upper_8kernel_2( dA, dB ); break; case 3: ztrsv_upper_8kernel_3( dA, dB ); break; case 4: ztrsv_upper_8kernel_4( dA, dB ); break; case 5: ztrsv_upper_8kernel_5( dA, dB ); break; case 6: ztrsv_upper_8kernel_6( dA, dB ); break; case 7: ztrsv_upper_8kernel_7( dA, dB ); break; case 8: ztrsv_upper_8kernel_8( dA, dB ); break; default: ztrsv_upper_8kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_zgpumemzero_8kernel( magmaDoubleComplex * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } __global__ void magma_zlocations_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_zlocations_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zfilltrisystems_8kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_zbackinsert_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_z_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_z_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems magmaDoubleComplex* trisystems @param[out] rhs magmaDoubleComplex* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zisaigenerator_8_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_z_matrix L, magma_z_matrix *M, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (TORCH_HIP_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { hipLaunchKernelGGL(( magma_zgpumemzero_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { hipLaunchKernelGGL(( magma_zlocations_lower_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { hipLaunchKernelGGL(( magma_zlocations_upper_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 800 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); hipLaunchKernelGGL(( magma_zgpumemzero_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, limit, WARP_SIZE, WARP_SIZE ); hipLaunchKernelGGL(( magma_zfilltrisystems_8kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() , 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { hipLaunchKernelGGL(( ztrsv_lower_8kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } else { hipLaunchKernelGGL(( ztrsv_upper_8kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } } // routine 3 hipLaunchKernelGGL(( magma_zbackinsert_8kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
7026e0f10a8ae2cc57bc8eef769cbe8188fa5381.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z #define COMPLEX #define BLOCKSIZE 8 #define WARP_SIZE 8 #define WRP 8 #define WRQ 4 #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION >= 7000) __device__ void ztrsv_lower_8kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_upper_8kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_lower_8kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_8kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_lower_8kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_lower_8kernel_1( dA, dB ); break; case 2: ztrsv_lower_8kernel_2( dA, dB ); break; case 3: ztrsv_lower_8kernel_3( dA, dB ); break; case 4: ztrsv_lower_8kernel_4( dA, dB ); break; case 5: ztrsv_lower_8kernel_5( dA, dB ); break; case 6: ztrsv_lower_8kernel_6( dA, dB ); break; case 7: ztrsv_lower_8kernel_7( dA, dB ); break; case 8: ztrsv_lower_8kernel_8( dA, dB ); break; default: ztrsv_lower_8kernel_general( dA, dB, sizes ); break; } } } __device__ void ztrsv_upper_8kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_8kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_upper_8kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_upper_8kernel_1( dA, dB ); break; case 2: ztrsv_upper_8kernel_2( dA, dB ); break; case 3: ztrsv_upper_8kernel_3( dA, dB ); break; case 4: ztrsv_upper_8kernel_4( dA, dB ); break; case 5: ztrsv_upper_8kernel_5( dA, dB ); break; case 6: ztrsv_upper_8kernel_6( dA, dB ); break; case 7: ztrsv_upper_8kernel_7( dA, dB ); break; case 8: ztrsv_upper_8kernel_8( dA, dB ); break; default: ztrsv_upper_8kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_zgpumemzero_8kernel( magmaDoubleComplex * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } __global__ void magma_zlocations_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_lower_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_zlocations_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_upper_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 8 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zfilltrisystems_8kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_zbackinsert_8kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_z_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_z_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems magmaDoubleComplex* trisystems @param[out] rhs magmaDoubleComplex* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zisaigenerator_8_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_z_matrix L, magma_z_matrix *M, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { magma_zgpumemzero_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { magma_zlocations_lower_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { magma_zlocations_upper_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 800 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); magma_zgpumemzero_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, limit, WARP_SIZE, WARP_SIZE ); magma_zfilltrisystems_8kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>( 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { ztrsv_lower_8kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } else { ztrsv_upper_8kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*8*z, sizes+32000*z, limit ); } } // routine 3 magma_zbackinsert_8kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
1203999d9d4dd136cbe836b08b197eb0b169e6d6.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <iostream> #include <list> #include <vector> #include "../Graph IO Utilities/GraphReader.h" #include"cudaAnalyzer.h" #include "stack.h" #include "PathsContainer.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void visitVertex(int toVisit, int destination, Stack* path, int* visitedVertices, PathsContainer* cycles, int* matrix, int count) { path->push(toVisit); visitedVertices[toVisit] = visited; for (int i = 0; i < count; i++) { if (matrix[toVisit * count + i] == connected) { if (i == destination) { // Found the destination path->push(destination); int* pathToAdd = path->makeCopy(); cycles->addPath(pathToAdd, path->count); free(pathToAdd); path->pop(); } else if (visitedVertices[i] == notVisited) { // Look futher visitVertex(i, destination, path, visitedVertices, cycles, matrix, count); } } } path->pop(); visitedVertices[toVisit] = notVisited; } __global__ void beginVisting(PathsContainer* d_outputs, int* d_matrix, config_t d_config) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int count = d_config.matrixSize; if (tid < d_config.matrixSize) { // PreAnalysis data preparations int* visitedVerticles = (int*)malloc(count * sizeof(int)); for (int i = 0; i < count; i++) { visitedVerticles[i] = notVisited; } Stack path; new(&d_outputs[tid]) PathsContainer; // Analysis visitVertex(tid, tid, &path, visitedVerticles, &d_outputs[tid], d_matrix, count); free(visitedVerticles); } } __global__ void getOutputSize(PathsContainer* d_outputs, int* outputSize, config_t d_config) { *outputSize = 0; for (int i = 0; i < d_config.matrixSize; i++) { *outputSize += d_outputs[i].count; } } __global__ void transferOutputs(int* cycles, PathsContainer* d_outputs, config_t config) { int offset = 0; for (int i = 0; i < config.matrixSize; i++) { memcpy(&cycles[offset], d_outputs[i].paths, d_outputs[i].count * sizeof(*cycles)); offset += d_outputs[i].count; } } __global__ void freePathsContainers(PathsContainer* d_outputs, int count) { for (int i = 0; i < count; i++) { if (&d_outputs[i] != NULL) { delete &d_outputs[i]; } } } std::list<std::vector<int>> convertToList(int* mergedCycles, int count) { std::list<std::vector<int>> cycles; std::vector<int> *tmp = new std::vector<int>(); int head = mergedCycles[0]; tmp->push_back(head); for (int i = 1; i < count; i++) { tmp->push_back(mergedCycles[i]); if (head == mergedCycles[i]) { cycles.push_back(*tmp); tmp = new std::vector<int>(); if (i + 1 < count) { head = mergedCycles[i + 1]; tmp->push_back(head); i++; } } } return cycles; } std::list<std::vector<int>> findCycles(int* matrix, config_t config) { // Data preparation PathsContainer* d_outputs; int* d_matrix; int matrixSize = config.matrixSize * config.matrixSize * sizeof(*matrix); gpuErrchk(hipMalloc(&d_matrix, matrixSize)); gpuErrchk(hipMemcpy(d_matrix, matrix, matrixSize, hipMemcpyHostToDevice)); gpuErrchk(hipMalloc(&d_outputs, config.matrixSize * sizeof(PathsContainer))); // Calculations beginVisting << <(config.matrixSize + 255) / 256, 256>> > (d_outputs, d_matrix, config); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Data size evaluation int* d_outputSize; gpuErrchk(hipMalloc(&d_outputSize, sizeof(int))); getOutputSize << <1, 1 >> > (d_outputs, d_outputSize, config); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int* outputSize = (int*)malloc(sizeof(int)); gpuErrchk(hipMemcpy(outputSize, d_outputSize, sizeof(*outputSize), hipMemcpyDeviceToHost)); // Data transfer int *d_cycles; gpuErrchk(hipMalloc(&d_cycles, *outputSize * sizeof(*matrix))); transferOutputs << <1, 1 >> > (d_cycles, d_outputs, config); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int* mergedCycles = (int*)malloc(*outputSize * sizeof(*matrix)); gpuErrchk(hipMemcpy(mergedCycles, d_cycles, *outputSize * sizeof(*matrix), hipMemcpyDeviceToHost)); // Conversion std::list<std::vector<int>> cycles; cycles = convertToList(mergedCycles, *outputSize); // Clean up gpuErrchk(hipFree(d_matrix)); gpuErrchk(hipFree(d_cycles)); freePathsContainers << <1, 1 >> > (d_outputs, config.matrixSize); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipFree(d_outputSize)); gpuErrchk(hipFree(d_outputs)); free(mergedCycles); free(outputSize); return cycles; }
1203999d9d4dd136cbe836b08b197eb0b169e6d6.cu
#pragma once #include <cuda.h> #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <iostream> #include <list> #include <vector> #include "../Graph IO Utilities/GraphReader.h" #include"cudaAnalyzer.h" #include "stack.h" #include "PathsContainer.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void visitVertex(int toVisit, int destination, Stack* path, int* visitedVertices, PathsContainer* cycles, int* matrix, int count) { path->push(toVisit); visitedVertices[toVisit] = visited; for (int i = 0; i < count; i++) { if (matrix[toVisit * count + i] == connected) { if (i == destination) { // Found the destination path->push(destination); int* pathToAdd = path->makeCopy(); cycles->addPath(pathToAdd, path->count); free(pathToAdd); path->pop(); } else if (visitedVertices[i] == notVisited) { // Look futher visitVertex(i, destination, path, visitedVertices, cycles, matrix, count); } } } path->pop(); visitedVertices[toVisit] = notVisited; } __global__ void beginVisting(PathsContainer* d_outputs, int* d_matrix, config_t d_config) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int count = d_config.matrixSize; if (tid < d_config.matrixSize) { // PreAnalysis data preparations int* visitedVerticles = (int*)malloc(count * sizeof(int)); for (int i = 0; i < count; i++) { visitedVerticles[i] = notVisited; } Stack path; new(&d_outputs[tid]) PathsContainer; // Analysis visitVertex(tid, tid, &path, visitedVerticles, &d_outputs[tid], d_matrix, count); free(visitedVerticles); } } __global__ void getOutputSize(PathsContainer* d_outputs, int* outputSize, config_t d_config) { *outputSize = 0; for (int i = 0; i < d_config.matrixSize; i++) { *outputSize += d_outputs[i].count; } } __global__ void transferOutputs(int* cycles, PathsContainer* d_outputs, config_t config) { int offset = 0; for (int i = 0; i < config.matrixSize; i++) { memcpy(&cycles[offset], d_outputs[i].paths, d_outputs[i].count * sizeof(*cycles)); offset += d_outputs[i].count; } } __global__ void freePathsContainers(PathsContainer* d_outputs, int count) { for (int i = 0; i < count; i++) { if (&d_outputs[i] != NULL) { delete &d_outputs[i]; } } } std::list<std::vector<int>> convertToList(int* mergedCycles, int count) { std::list<std::vector<int>> cycles; std::vector<int> *tmp = new std::vector<int>(); int head = mergedCycles[0]; tmp->push_back(head); for (int i = 1; i < count; i++) { tmp->push_back(mergedCycles[i]); if (head == mergedCycles[i]) { cycles.push_back(*tmp); tmp = new std::vector<int>(); if (i + 1 < count) { head = mergedCycles[i + 1]; tmp->push_back(head); i++; } } } return cycles; } std::list<std::vector<int>> findCycles(int* matrix, config_t config) { // Data preparation PathsContainer* d_outputs; int* d_matrix; int matrixSize = config.matrixSize * config.matrixSize * sizeof(*matrix); gpuErrchk(cudaMalloc(&d_matrix, matrixSize)); gpuErrchk(cudaMemcpy(d_matrix, matrix, matrixSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_outputs, config.matrixSize * sizeof(PathsContainer))); // Calculations beginVisting << <(config.matrixSize + 255) / 256, 256>> > (d_outputs, d_matrix, config); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Data size evaluation int* d_outputSize; gpuErrchk(cudaMalloc(&d_outputSize, sizeof(int))); getOutputSize << <1, 1 >> > (d_outputs, d_outputSize, config); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int* outputSize = (int*)malloc(sizeof(int)); gpuErrchk(cudaMemcpy(outputSize, d_outputSize, sizeof(*outputSize), cudaMemcpyDeviceToHost)); // Data transfer int *d_cycles; gpuErrchk(cudaMalloc(&d_cycles, *outputSize * sizeof(*matrix))); transferOutputs << <1, 1 >> > (d_cycles, d_outputs, config); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int* mergedCycles = (int*)malloc(*outputSize * sizeof(*matrix)); gpuErrchk(cudaMemcpy(mergedCycles, d_cycles, *outputSize * sizeof(*matrix), cudaMemcpyDeviceToHost)); // Conversion std::list<std::vector<int>> cycles; cycles = convertToList(mergedCycles, *outputSize); // Clean up gpuErrchk(cudaFree(d_matrix)); gpuErrchk(cudaFree(d_cycles)); freePathsContainers << <1, 1 >> > (d_outputs, config.matrixSize); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaFree(d_outputSize)); gpuErrchk(cudaFree(d_outputs)); free(mergedCycles); free(outputSize); return cycles; }
181118613bb732793f773214556ec999d6e1b99e.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> __global__ void nestedHelloWorld(int const iSize, int minSize, int iDepth) { int tid = blockIdx.x * blockDim.x + threadIdx.x; printf("Recursion=%d: Hello World from thread %d\n", iDepth, tid); // condition to stop recursive execution if (iSize == minSize) return; // reduce nthreads by half int nthreads = iSize >> 1; // thread 0 launches child grid recursively if(tid == 0 && nthreads > 0) { int blocks = (nthreads + blockDim.x - 1) / blockDim.x; // nestedHelloWorld<<<blocks, blockDim.x>>>(nthreads, minSize, ++iDepth); printf("-------> nested execution depth: %d\n", iDepth); } } int main(int argc, char **argv) { int igrid = 1; int blocksize = 8; if(argc > 1) { igrid = atoi(argv[1]); } if (argc > 2) { blocksize = atoi(argv[2]); } int size = igrid * blocksize; dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("size = %d\n", size); printf("igrid = %d\n", igrid); printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x, block.x); hipLaunchKernelGGL(( nestedHelloWorld), dim3(grid), dim3(block), 0, 0, size, grid.x, 0); CHECK(hipGetLastError()); CHECK(hipDeviceSynchronize()); return 0; }
181118613bb732793f773214556ec999d6e1b99e.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> __global__ void nestedHelloWorld(int const iSize, int minSize, int iDepth) { int tid = blockIdx.x * blockDim.x + threadIdx.x; printf("Recursion=%d: Hello World from thread %d\n", iDepth, tid); // condition to stop recursive execution if (iSize == minSize) return; // reduce nthreads by half int nthreads = iSize >> 1; // thread 0 launches child grid recursively if(tid == 0 && nthreads > 0) { int blocks = (nthreads + blockDim.x - 1) / blockDim.x; // nestedHelloWorld<<<blocks, blockDim.x>>>(nthreads, minSize, ++iDepth); printf("-------> nested execution depth: %d\n", iDepth); } } int main(int argc, char **argv) { int igrid = 1; int blocksize = 8; if(argc > 1) { igrid = atoi(argv[1]); } if (argc > 2) { blocksize = atoi(argv[2]); } int size = igrid * blocksize; dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("size = %d\n", size); printf("igrid = %d\n", igrid); printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x, block.x); nestedHelloWorld<<<grid, block>>>(size, grid.x, 0); CHECK(cudaGetLastError()); CHECK(cudaDeviceSynchronize()); return 0; }
df871956c80854cd487b6aa07bc051027fff7df0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include "../configuration.h" #include "../dataset_loader.h" #include <ctime> #include "gol.h" #include <iostream> #include <ctime> #include <ratio> #include <chrono> #ifdef OPTION_RENDER // Rendering array. // TODO: Fix variable names. __device__ char *device_render_cells; char *host_render_cells; char *d_device_render_cells; #endif // OPTION_RENDER // Dataset. __device__ int SIZE_X; __device__ int SIZE_Y; __device__ Cell **cells; dataset_t dataset; __device__ int num_alive_neighbors(Agent *ptr) { int cell_x = ptr->cell_id() % SIZE_X; int cell_y = ptr->cell_id() / SIZE_X; int result = 0; for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = cell_x + dx; int ny = cell_y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { Agent *tmp = cells[ny * SIZE_X + nx]->agent(); if (tmp) { if (tmp->isAlive()) { result++; } } } } } return result; } __device__ void maybe_create_candidate(Agent *ptr, int x, int y) { // Check neighborhood of cell to determine who should create Candidate. for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = x + dx; int ny = y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { Agent *alive = cells[ny * SIZE_X + nx]->agent(); if (alive != nullptr) { if (alive->is_new()) { if (alive == ptr) { // Create candidate now. cells[y * SIZE_X + x]->set_agent((y * SIZE_X + x), AgentType::isCandidate); } // else: Created by other thread. return; } } } } } assert(false); } __device__ void create_candidates(Agent *ptr) { assert(ptr->is_new()); assert(ptr->isAlive()); // TODO: Consolidate with Agent::num_alive_neighbors(). int cell_x = ptr->cell_id() % SIZE_X; int cell_y = ptr->cell_id() / SIZE_X; for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = cell_x + dx; int ny = cell_y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { if (cells[ny * SIZE_X + nx]->is_empty()) { // Candidate should be created here. maybe_create_candidate(ptr, nx, ny); } } } } } __device__ void Alive_prepare(Agent *ptr) { if (ptr) { if (ptr->isAlive()) { ptr->set_is_new(false); // Also counts this object itself. int alive_neighbors = num_alive_neighbors(ptr) - 1; if (alive_neighbors < 2 || alive_neighbors > 3) { ptr->set_action(kActionDie); } } } } __device__ void Alive_update(Agent *ptr) { if (ptr) { if (ptr->isAlive()) { int cid = ptr->cell_id(); // TODO: Consider splitting in two classes for less divergence. if (ptr->is_new()) { // Create candidates in neighborhood. create_candidates(ptr); } else { if (ptr->get_action() == kActionDie) { cells[cid]->set_agent(cid, AgentType::isCandidate); } } } } } __global__ void alive_prepare() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Alive_prepare(ptr); } } __global__ void alive_update() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Alive_update(ptr); } } __device__ void Candidate_prepare(Agent *ptr) { if (ptr) { if (ptr->isCandidate()) { int alive_neighbors = num_alive_neighbors(ptr); if (alive_neighbors == 3) { ptr->set_action(kActionSpawnAlive); } else if (alive_neighbors == 0) { ptr->set_action(kActionDie); } } } } __device__ void Candidate_update(Agent *ptr) { // TODO: Why is this necessary? if (ptr) { if (ptr->isCandidate()) { int cid = ptr->cell_id(); if (ptr->get_action() == kActionSpawnAlive) { cells[cid]->set_agent(cid, AgentType::isAlive); } else if (ptr->get_action() == kActionDie) { cells[cid]->delete_agent(); } } } } __global__ void candidate_prepare() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Candidate_prepare(ptr); } } __global__ void candidate_update() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Candidate_update(ptr); } } __global__ void create_cells() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { cells[i] = new Cell(); assert(cells[i] != nullptr); } } // Must be followed by Alive::update(). __global__ void load_game(int *cell_ids, int num_cells) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_cells; i += blockDim.x * gridDim.x) { cells[cell_ids[i]]->set_agent(cell_ids[i], AgentType::isAlive); assert(cells[cell_ids[i]]->agent()->cell_id() == cell_ids[i]); } } int checksum(); void transfer_dataset() { int *dev_cell_ids; hipMalloc(&dev_cell_ids, sizeof(int) * dataset.num_alive); hipMemcpy(dev_cell_ids, dataset.alive_cells, sizeof(int) * dataset.num_alive, hipMemcpyHostToDevice); #ifndef NDEBUG printf("Loading on GPU: %i alive cells.\n", dataset.num_alive); #endif // NDEBUG hipLaunchKernelGGL(( load_game), dim3(128), dim3(128), 0, 0, dev_cell_ids, dataset.num_alive); gpuErrchk(hipDeviceSynchronize()); hipFree(dev_cell_ids); hipLaunchKernelGGL(( alive_update), dim3(1024), dim3(1024), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __device__ int device_checksum; __device__ int device_num_candidates; ALL void Agent::update_checksum() { if (this->isAlive()) atomicAdd(&device_checksum, 1); else if(this-> isCandidate()) atomicAdd(&device_num_candidates, 1); } __global__ void update_checksum() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); if (ptr) ptr->update_checksum(); } } int checksum() { int host_checksum = 0; int host_num_candidates = 0; hipMemcpyToSymbol(device_checksum, &host_checksum, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(device_num_candidates, &host_num_candidates, sizeof(int), 0, hipMemcpyHostToDevice); // allocator_handle->parallel_do<Alive, &Alive::update_checksum>(); // allocator_handle->parallel_do<Candidate, &Candidate::update_counter>(); hipLaunchKernelGGL(( update_checksum), dim3(1024), dim3(1024), 0, 0, ); hipMemcpyFromSymbol(&host_checksum, device_checksum, sizeof(int), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&host_num_candidates, device_num_candidates, sizeof(int), 0, hipMemcpyDeviceToHost); return host_checksum ; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s filename.pgm\n", argv[0]); exit(1); } else { // Load data set. dataset = load_from_file(argv[1]); } hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); hipMemcpyToSymbol(SIZE_X, &dataset.x, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(SIZE_Y, &dataset.y, sizeof(int), 0, hipMemcpyHostToDevice); // Allocate memory. Cell **host_cells; using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); hipMalloc(&host_cells, sizeof(Cell *) * dataset.x * dataset.y); hipMemcpyToSymbol(cells, &host_cells, sizeof(Cell **), 0, hipMemcpyHostToDevice); // Initialize cells. hipLaunchKernelGGL(( create_cells), dim3(128), dim3(128), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1); printf("alloc_time : %f \n",alloc_time.count() ); transfer_dataset(); auto time_start = std::chrono::system_clock::now(); printf("Checksum: %i\n", checksum()); // Run simulation. for (int i = 0; i < kNumIterations; ++i) { hipLaunchKernelGGL(( candidate_prepare), dim3(1024), dim3(1024), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( alive_prepare), dim3(1024), dim3(1024), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( candidate_update), dim3(1024), dim3(1024), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( alive_update), dim3(1024), dim3(1024), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); // printf("Checksum: %i\n", checksum()); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); printf("Checksum: %i\n", checksum()); printf("%lu, \n", micros); hipFree(host_cells); return 0; }
df871956c80854cd487b6aa07bc051027fff7df0.cu
#include <chrono> #include "../configuration.h" #include "../dataset_loader.h" #include <ctime> #include "gol.h" #include <iostream> #include <ctime> #include <ratio> #include <chrono> #ifdef OPTION_RENDER // Rendering array. // TODO: Fix variable names. __device__ char *device_render_cells; char *host_render_cells; char *d_device_render_cells; #endif // OPTION_RENDER // Dataset. __device__ int SIZE_X; __device__ int SIZE_Y; __device__ Cell **cells; dataset_t dataset; __device__ int num_alive_neighbors(Agent *ptr) { int cell_x = ptr->cell_id() % SIZE_X; int cell_y = ptr->cell_id() / SIZE_X; int result = 0; for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = cell_x + dx; int ny = cell_y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { Agent *tmp = cells[ny * SIZE_X + nx]->agent(); if (tmp) { if (tmp->isAlive()) { result++; } } } } } return result; } __device__ void maybe_create_candidate(Agent *ptr, int x, int y) { // Check neighborhood of cell to determine who should create Candidate. for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = x + dx; int ny = y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { Agent *alive = cells[ny * SIZE_X + nx]->agent(); if (alive != nullptr) { if (alive->is_new()) { if (alive == ptr) { // Create candidate now. cells[y * SIZE_X + x]->set_agent((y * SIZE_X + x), AgentType::isCandidate); } // else: Created by other thread. return; } } } } } assert(false); } __device__ void create_candidates(Agent *ptr) { assert(ptr->is_new()); assert(ptr->isAlive()); // TODO: Consolidate with Agent::num_alive_neighbors(). int cell_x = ptr->cell_id() % SIZE_X; int cell_y = ptr->cell_id() / SIZE_X; for (int dx = -1; dx < 2; ++dx) { for (int dy = -1; dy < 2; ++dy) { int nx = cell_x + dx; int ny = cell_y + dy; if (nx > -1 && nx < SIZE_X && ny > -1 && ny < SIZE_Y) { if (cells[ny * SIZE_X + nx]->is_empty()) { // Candidate should be created here. maybe_create_candidate(ptr, nx, ny); } } } } } __device__ void Alive_prepare(Agent *ptr) { if (ptr) { if (ptr->isAlive()) { ptr->set_is_new(false); // Also counts this object itself. int alive_neighbors = num_alive_neighbors(ptr) - 1; if (alive_neighbors < 2 || alive_neighbors > 3) { ptr->set_action(kActionDie); } } } } __device__ void Alive_update(Agent *ptr) { if (ptr) { if (ptr->isAlive()) { int cid = ptr->cell_id(); // TODO: Consider splitting in two classes for less divergence. if (ptr->is_new()) { // Create candidates in neighborhood. create_candidates(ptr); } else { if (ptr->get_action() == kActionDie) { cells[cid]->set_agent(cid, AgentType::isCandidate); } } } } } __global__ void alive_prepare() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Alive_prepare(ptr); } } __global__ void alive_update() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Alive_update(ptr); } } __device__ void Candidate_prepare(Agent *ptr) { if (ptr) { if (ptr->isCandidate()) { int alive_neighbors = num_alive_neighbors(ptr); if (alive_neighbors == 3) { ptr->set_action(kActionSpawnAlive); } else if (alive_neighbors == 0) { ptr->set_action(kActionDie); } } } } __device__ void Candidate_update(Agent *ptr) { // TODO: Why is this necessary? if (ptr) { if (ptr->isCandidate()) { int cid = ptr->cell_id(); if (ptr->get_action() == kActionSpawnAlive) { cells[cid]->set_agent(cid, AgentType::isAlive); } else if (ptr->get_action() == kActionDie) { cells[cid]->delete_agent(); } } } } __global__ void candidate_prepare() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Candidate_prepare(ptr); } } __global__ void candidate_update() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); Candidate_update(ptr); } } __global__ void create_cells() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { cells[i] = new Cell(); assert(cells[i] != nullptr); } } // Must be followed by Alive::update(). __global__ void load_game(int *cell_ids, int num_cells) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_cells; i += blockDim.x * gridDim.x) { cells[cell_ids[i]]->set_agent(cell_ids[i], AgentType::isAlive); assert(cells[cell_ids[i]]->agent()->cell_id() == cell_ids[i]); } } int checksum(); void transfer_dataset() { int *dev_cell_ids; cudaMalloc(&dev_cell_ids, sizeof(int) * dataset.num_alive); cudaMemcpy(dev_cell_ids, dataset.alive_cells, sizeof(int) * dataset.num_alive, cudaMemcpyHostToDevice); #ifndef NDEBUG printf("Loading on GPU: %i alive cells.\n", dataset.num_alive); #endif // NDEBUG load_game<<<128, 128>>>(dev_cell_ids, dataset.num_alive); gpuErrchk(cudaDeviceSynchronize()); cudaFree(dev_cell_ids); alive_update<<<1024, 1024>>>(); gpuErrchk(cudaDeviceSynchronize()); } __device__ int device_checksum; __device__ int device_num_candidates; ALL void Agent::update_checksum() { if (this->isAlive()) atomicAdd(&device_checksum, 1); else if(this-> isCandidate()) atomicAdd(&device_num_candidates, 1); } __global__ void update_checksum() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < SIZE_X * SIZE_Y; i += blockDim.x * gridDim.x) { Agent *ptr = cells[i]->agent(); if (ptr) ptr->update_checksum(); } } int checksum() { int host_checksum = 0; int host_num_candidates = 0; cudaMemcpyToSymbol(device_checksum, &host_checksum, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(device_num_candidates, &host_num_candidates, sizeof(int), 0, cudaMemcpyHostToDevice); // allocator_handle->parallel_do<Alive, &Alive::update_checksum>(); // allocator_handle->parallel_do<Candidate, &Candidate::update_counter>(); update_checksum<<<1024, 1024>>>(); cudaMemcpyFromSymbol(&host_checksum, device_checksum, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&host_num_candidates, device_num_candidates, sizeof(int), 0, cudaMemcpyDeviceToHost); return host_checksum ; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s filename.pgm\n", argv[0]); exit(1); } else { // Load data set. dataset = load_from_file(argv[1]); } cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); cudaMemcpyToSymbol(SIZE_X, &dataset.x, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(SIZE_Y, &dataset.y, sizeof(int), 0, cudaMemcpyHostToDevice); // Allocate memory. Cell **host_cells; using namespace std::chrono; high_resolution_clock::time_point t1 = high_resolution_clock::now(); cudaMalloc(&host_cells, sizeof(Cell *) * dataset.x * dataset.y); cudaMemcpyToSymbol(cells, &host_cells, sizeof(Cell **), 0, cudaMemcpyHostToDevice); // Initialize cells. create_cells<<<128, 128>>>(); gpuErrchk(cudaDeviceSynchronize()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1); printf("alloc_time : %f \n",alloc_time.count() ); transfer_dataset(); auto time_start = std::chrono::system_clock::now(); printf("Checksum: %i\n", checksum()); // Run simulation. for (int i = 0; i < kNumIterations; ++i) { candidate_prepare<<<1024, 1024>>>(); gpuErrchk(cudaDeviceSynchronize()); alive_prepare<<<1024, 1024>>>(); gpuErrchk(cudaDeviceSynchronize()); candidate_update<<<1024, 1024>>>(); gpuErrchk(cudaDeviceSynchronize()); alive_update<<<1024, 1024>>>(); gpuErrchk(cudaDeviceSynchronize()); // printf("Checksum: %i\n", checksum()); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); printf("Checksum: %i\n", checksum()); printf("%lu, \n", micros); cudaFree(host_cells); return 0; }
c3a684b86dcecef70dd491a87109814a11db9f74.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include <algorithm> #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_multiply_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include "paddle/phi/kernels/reduce_max_kernel.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T> void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = ::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * ::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp); auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, std::vector<int64_t>{-1}, false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>( dev_ctx, atol_tensor, rtol_tensor, -1, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); int axis = -1; funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, axis, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) {} #endif // not PADDLE_WITH_HIP
c3a684b86dcecef70dd491a87109814a11db9f74.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include <algorithm> #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_multiply_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include "paddle/phi/kernels/reduce_max_kernel.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T> void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = std::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * std::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp); auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, std::vector<int64_t>{-1}, false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>( dev_ctx, atol_tensor, rtol_tensor, -1, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); int axis = -1; funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, axis, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) {} #endif // not PADDLE_WITH_HIP
9f29628e3bc8b67fb952464fb4fa0fe2b8ebdead.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <hip/hip_complex.h> static const int WORK_SIZE = 10; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } int main(void) { int *d = NULL; int i; float2 idata[WORK_SIZE]; float2 odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++){ idata[i].x = i; idata[i].y = 0; } hipfftReal a; hipfftHandle plan; hipfftComplex *data; hipMalloc((void**)&data, sizeof(float2)*WORK_SIZE); hipMemcpy(data,idata,sizeof(float2)*WORK_SIZE,hipMemcpyHostToDevice); //hipfftPlan1d(&plan, WORK_SIZE, HIPFFT_C2C,1); //hipfftPlanMany(hipfftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, hipfftType type, int batch); int rank = 1; int n[1]; int inembed[1]; int istride=1; int idist=5; int onembed[1]; int ostride = 1; int odist = 5; int batch =2; n[0]=WORK_SIZE/2; inembed[0]=WORK_SIZE; onembed[0]=WORK_SIZE; hipfftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch); hipfftExecC2C(plan, data, data, HIPFFT_FORWARD); hipDeviceSynchronize(); CUDA_CHECK_RETURN(hipMemcpy(odata, data, sizeof(float2)*WORK_SIZE, hipMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("%f\n",cuCabsf(odata[i])); CUDA_CHECK_RETURN(hipFree((int*) d)); CUDA_CHECK_RETURN(hipDeviceReset()); hipFree(data); hipfftDestroy(plan); return 0; }
9f29628e3bc8b67fb952464fb4fa0fe2b8ebdead.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cufft.h> #include <cuComplex.h> static const int WORK_SIZE = 10; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } int main(void) { int *d = NULL; int i; float2 idata[WORK_SIZE]; float2 odata[WORK_SIZE]; for (i = 0; i < WORK_SIZE; i++){ idata[i].x = i; idata[i].y = 0; } cufftReal a; cufftHandle plan; cufftComplex *data; cudaMalloc((void**)&data, sizeof(float2)*WORK_SIZE); cudaMemcpy(data,idata,sizeof(float2)*WORK_SIZE,cudaMemcpyHostToDevice); //cufftPlan1d(&plan, WORK_SIZE, CUFFT_C2C,1); //cufftPlanMany(cufftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch); int rank = 1; int n[1]; int inembed[1]; int istride=1; int idist=5; int onembed[1]; int ostride = 1; int odist = 5; int batch =2; n[0]=WORK_SIZE/2; inembed[0]=WORK_SIZE; onembed[0]=WORK_SIZE; cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); cufftExecC2C(plan, data, data, CUFFT_FORWARD); cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaMemcpy(odata, data, sizeof(float2)*WORK_SIZE, cudaMemcpyDeviceToHost)); for (i = 0; i < WORK_SIZE; i++) printf("%f\n",cuCabsf(odata[i])); CUDA_CHECK_RETURN(cudaFree((int*) d)); CUDA_CHECK_RETURN(cudaDeviceReset()); cudaFree(data); cufftDestroy(plan); return 0; }
867bd06a50371124995f629957224f0da35c0829.hip
// !!! This is a file automatically generated by hipify!!! // *************************************************************************** // Assignment #2 // Name: Yujin Yoshimura // Parallel Programming Date: March 5, 2020 // *************************************************************************** // This sequential program demonstrates Matrix Multiplication. // // For Turing, use the script on the same directory to compile and run. // TACC Maverick 2 command to compile: // gcc YujinYoshimura1B.cu -o YujinYoshimura1B_Exe // *************************************************************************** #include <hip/hip_runtime.h> #include <stdio.h> const int ROW = 32; const int COL = 32; // *************************************************************************** // Function Name: cross_product // Parameters: int*, int*, int* // Return: void // Description: Returns the cross product of two matrices. // *************************************************************************** __global__ void cross_product(int* a, int* b, int* c) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k; c[i * COL + j] = 0; for (k = 0; k < ROW; k++) { c[i * COL + j] += a[i * COL + k] * b[k * COL + j]; } } // *************************************************************************** // Function Name: main // Parameters: int, char** // Return: int // Description: Main function of the program. // *************************************************************************** int main(int argc, char **argv) { int matrix_a[ROW * COL]; int matrix_b[ROW * COL]; int matrix_c[ROW * COL]; int i, j, sum = 0; int* ad; int* bd; int* cd; const int isize = ROW * COL * sizeof(int); float elapsed; hipEvent_t start, stop; // Create CUDA Events hipEventCreate(&start); hipEventCreate(&stop); // Initialize matrix A, B and C for (i = 0; i < ROW; i++) { for (j = 0; j < COL; j++) { matrix_a[i * COL + j] = i; matrix_b[i * COL + j] = COL - i - 1; matrix_c[i * COL + j] = 0; } } // Allocate memory and copy matrices to global memory hipMalloc( (void**)&ad, isize ); hipMalloc( (void**)&bd, isize ); hipMalloc( (void**)&cd, isize ); hipMemcpy( ad, matrix_a, isize, hipMemcpyHostToDevice ); hipMemcpy( bd, matrix_b, isize, hipMemcpyHostToDevice ); hipMemcpy( cd, matrix_c, isize, hipMemcpyHostToDevice ); dim3 dimGrid( 1 , 1 ); dim3 dimBlock( COL , ROW ); hipEventRecord(start); hipLaunchKernelGGL(( cross_product), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd); hipEventRecord(stop); // Copy matrix to memory, time and free global memory hipMemcpy( matrix_c, cd, isize, hipMemcpyDeviceToHost ); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); hipFree( ad ); hipFree( bd ); hipFree( cd ); for (i = 0; i < ROW; i++) { for (j = 0; j < COL; j++) { sum += matrix_c[i * COL + j]; } } printf("The summation of all the elements is = %d\n", sum); printf("Elapsed time = %f milliseconds.\n", elapsed); return EXIT_SUCCESS; }
867bd06a50371124995f629957224f0da35c0829.cu
// *************************************************************************** // Assignment #2 // Name: Yujin Yoshimura // Parallel Programming Date: March 5, 2020 // *************************************************************************** // This sequential program demonstrates Matrix Multiplication. // // For Turing, use the script on the same directory to compile and run. // TACC Maverick 2 command to compile: // gcc YujinYoshimura1B.cu -o YujinYoshimura1B_Exe // *************************************************************************** #include <cuda.h> #include <stdio.h> const int ROW = 32; const int COL = 32; // *************************************************************************** // Function Name: cross_product // Parameters: int*, int*, int* // Return: void // Description: Returns the cross product of two matrices. // *************************************************************************** __global__ void cross_product(int* a, int* b, int* c) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k; c[i * COL + j] = 0; for (k = 0; k < ROW; k++) { c[i * COL + j] += a[i * COL + k] * b[k * COL + j]; } } // *************************************************************************** // Function Name: main // Parameters: int, char** // Return: int // Description: Main function of the program. // *************************************************************************** int main(int argc, char **argv) { int matrix_a[ROW * COL]; int matrix_b[ROW * COL]; int matrix_c[ROW * COL]; int i, j, sum = 0; int* ad; int* bd; int* cd; const int isize = ROW * COL * sizeof(int); float elapsed; cudaEvent_t start, stop; // Create CUDA Events cudaEventCreate(&start); cudaEventCreate(&stop); // Initialize matrix A, B and C for (i = 0; i < ROW; i++) { for (j = 0; j < COL; j++) { matrix_a[i * COL + j] = i; matrix_b[i * COL + j] = COL - i - 1; matrix_c[i * COL + j] = 0; } } // Allocate memory and copy matrices to global memory cudaMalloc( (void**)&ad, isize ); cudaMalloc( (void**)&bd, isize ); cudaMalloc( (void**)&cd, isize ); cudaMemcpy( ad, matrix_a, isize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, matrix_b, isize, cudaMemcpyHostToDevice ); cudaMemcpy( cd, matrix_c, isize, cudaMemcpyHostToDevice ); dim3 dimGrid( 1 , 1 ); dim3 dimBlock( COL , ROW ); cudaEventRecord(start); cross_product<<<dimGrid, dimBlock>>>(ad, bd, cd); cudaEventRecord(stop); // Copy matrix to memory, time and free global memory cudaMemcpy( matrix_c, cd, isize, cudaMemcpyDeviceToHost ); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); for (i = 0; i < ROW; i++) { for (j = 0; j < COL; j++) { sum += matrix_c[i * COL + j]; } } printf("The summation of all the elements is = %d\n", sum); printf("Elapsed time = %f milliseconds.\n", elapsed); return EXIT_SUCCESS; }
61e82d5921e67d2bb88e3462550e8a336448f364.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __global__ void FastWalshHadamardKernel(const int stride, const scalar_t* in, scalar_t* out) { const auto idx = (threadIdx.x + blockIdx.x * blockDim.x); const auto elemIdx = (idx / stride ) * (2 * stride) + (idx % stride); const auto tmp = in[elemIdx], tmp2 = in[elemIdx + stride]; out[elemIdx] = tmp + tmp2; out[elemIdx + stride] = tmp - tmp2; } template <typename scalar_t> __global__ void FastWalshHadamardSubKernel(const scalar_t scalar, scalar_t* out) { const auto idx = (threadIdx.x + blockIdx.x * blockDim.x); out[idx] *= scalar; } void fast_walsh_hadamard_transform_cuda_kernel(const int NN, const int halfLL, torch::Tensor in, torch::Tensor out, bool normalize) { // Apply Unnormalized Fast Walsh Hadamard transform int stride = halfLL; float normalizer = 1.0; float sqrt2inv = 0.70710678118654746; while (stride >= 1) { if(stride == halfLL) { AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_in", ([&] { hipLaunchKernelGGL(( FastWalshHadamardKernel<scalar_t>), dim3(max(1, halfLL/256)), dim3(min(256, halfLL)), 0, 0, stride, in.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); } else { AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_out", ([&] { hipLaunchKernelGGL(( FastWalshHadamardKernel<scalar_t>), dim3(max(1, halfLL/256)), dim3(min(256, halfLL)), 0, 0, stride, out.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); } stride /= 2; normalizer *= sqrt2inv; } if(normalize){ AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_final", ([&] { hipLaunchKernelGGL(( FastWalshHadamardSubKernel<scalar_t>), dim3(max(1, NN/256)), dim3(min(256, NN)), 0, 0, normalizer, out.data_ptr<scalar_t>()); })); } }
61e82d5921e67d2bb88e3462550e8a336448f364.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __global__ void FastWalshHadamardKernel(const int stride, const scalar_t* in, scalar_t* out) { const auto idx = (threadIdx.x + blockIdx.x * blockDim.x); const auto elemIdx = (idx / stride ) * (2 * stride) + (idx % stride); const auto tmp = in[elemIdx], tmp2 = in[elemIdx + stride]; out[elemIdx] = tmp + tmp2; out[elemIdx + stride] = tmp - tmp2; } template <typename scalar_t> __global__ void FastWalshHadamardSubKernel(const scalar_t scalar, scalar_t* out) { const auto idx = (threadIdx.x + blockIdx.x * blockDim.x); out[idx] *= scalar; } void fast_walsh_hadamard_transform_cuda_kernel(const int NN, const int halfLL, torch::Tensor in, torch::Tensor out, bool normalize) { // Apply Unnormalized Fast Walsh Hadamard transform int stride = halfLL; float normalizer = 1.0; float sqrt2inv = 0.70710678118654746; while (stride >= 1) { if(stride == halfLL) { AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_in", ([&] { FastWalshHadamardKernel<scalar_t><<<max(1, halfLL/256), min(256, halfLL)>>>(stride, in.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); } else { AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_out", ([&] { FastWalshHadamardKernel<scalar_t><<<max(1, halfLL/256), min(256, halfLL)>>>(stride, out.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); } stride /= 2; normalizer *= sqrt2inv; } if(normalize){ AT_DISPATCH_FLOATING_TYPES(in.scalar_type(),"fast_walsh_hadamard_transform_final", ([&] { FastWalshHadamardSubKernel<scalar_t><<<max(1, NN/256), min(256, NN)>>>(normalizer, out.data_ptr<scalar_t>()); })); } }
cbbd6b72d6220435c859e49a1e4f99038058ab0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <iostream> #include "gpu-new-forward.h" #include "hip/hip_fp16.h" #define TILE_WIDTH 16 const int constMemSize = (16*4*7*7)/2; //M*C*K*K __constant__ half2 Kc[constMemSize];//filter-bank __global__ void shared_mem_kernel(half *y, const half2 *x, const int B, const int M, const int C, const int H, const int W, const int K) { const int H_out = H - K + 1; const int W_out = W - K + 1; #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[((i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0) / 2] #define k4d(i3, i2, i1, i0) Kc[((i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0) / 2] // Insert your GPU convolution kernel code here int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int n, m, h, w, c, p, q; n = blockIdx.x; //current channel m = blockIdx.y; //current ouput feature map h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x; half2 acc = __floats2half2_rn(0.0f,0.0f); half total; if(h < H_out && w < W_out){ for(c=0; c<C; c++) { int d; for (d = 0; d < K*K - 1; d+=2) { p = d / 7; q = d % 7; half2 x_half = x4d(n, c, h+p, w+q); acc = __hadd2(acc, __hmul2(x4d(n, c, h+p, w+q), k4d(m, c, p, q))); } total = __hadd(acc.x, acc.y); total = __hadd(total, __hmul(x4d(n, c, h+6, w+6).x, k4d(m, c, 6, 6).x)); } y4d(n, m, h, w) = total; } #undef y4d #undef x4d #undef k4d } //acc = __hadd(acc, __hmul(x4d(n, c, h+p, w+q), k4d(m, c, p, q))); __host__ void GPUInterface::conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers half* device_y; half2* device_x; half2* device_k; half2* half_x; half* half_y; half2* half_k; const int y_size = B*M*(H-K+1)*(W-K+1); const int x_size = ceil((1.0*B*C*H*W)/2); const int k_size = ceil((1.0*M*C*K*K)/2); half_y = (half*)malloc( y_size*sizeof(half)); half_x = (half2*)malloc( x_size*sizeof(half2)); half_k = (half2*)malloc( k_size*sizeof(half2)); for (int i = 0; i < x_size; i++) { if (2*i+1 < B*C*H*W) { half_x[i] = __floats2half2_rn(host_x[2*i],host_x[2*i+1]); } else { half_x[i] = __floats2half2_rn(host_x[2*i], 0.0f); } } for (int i = 0; i < k_size; i++) { if (2*i+1 < M*C*K*K) { half_k[i] = __floats2half2_rn(host_k[2*i],host_k[2*i+1]); } else { half_k[i] = __floats2half2_rn(host_k[2*i], 0.0f); } } // Allocate memory and copy over the relevant data structures to the GPU hipMalloc((void**) &device_y, y_size*sizeof(half)); hipMalloc((void**) &device_x, x_size*sizeof(half2)); hipMalloc((void**) &device_k, k_size*sizeof(half2)); std::cout<< "M: "<<M<<"\n"; std::cout<< "C: "<<C<<"\n"; std::cout<< "K: "<<K<<"\n"; std::cout<< "H: "<<H<<"\n"; std::cout<< "W: "<<W<<"\n"; std::cout<< "B: "<<B<<"\n"; get_device_properties(); //copy input to GPU hipMemcpy(device_x, half_x, x_size*sizeof(half2), hipMemcpyHostToDevice); //hipMemcpy(device_k, half_k, M*C*K*K*sizeof(half), hipMemcpyHostToDevice); hipMemcpyToSymbol(Kc, half_k, (7*7*4*16/2)*sizeof(half)); // Set the kernel dimensions and call the kernel int W_out = H - K + 1; //output feature map width int H_out = W - K + 1; //output feature map height int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int H_grid = ceil(H_out*1.0 / TILE_WIDTH); //# of titls in height int Z = H_grid * W_grid; //total number of tile dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); //thread block size dim3 gridDim(B, M, Z); //batch_size, # of output feature maps, total number of tiles hipLaunchKernelGGL(( shared_mem_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_y, device_x, B, M, C, H, W, K); // Copy the output back to host hipMemcpy(half_y, device_y, B*M*(H-K+1)*(W-K+1)*sizeof(half), hipMemcpyDeviceToHost); for (int i = 0; i < B*M*(H-K+1)*(W-K+1); i++) { host_y[i] = __half2float(half_y[i]); } // Free device memory hipFree(device_y); hipFree(device_x); hipFree(device_k); // Useful snippet for error checking // hipError_t error = hipGetLastError(); // if(error != hipSuccess) // { // std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl; // exit(-1); // } free(half_x); free(half_y); free(half_k); } __host__ void GPUInterface::get_device_properties() { int deviceCount; hipGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
cbbd6b72d6220435c859e49a1e4f99038058ab0f.cu
#include <cmath> #include <iostream> #include "gpu-new-forward.h" #include "cuda_fp16.h" #define TILE_WIDTH 16 const int constMemSize = (16*4*7*7)/2; //M*C*K*K __constant__ half2 Kc[constMemSize];//filter-bank __global__ void shared_mem_kernel(half *y, const half2 *x, const int B, const int M, const int C, const int H, const int W, const int K) { const int H_out = H - K + 1; const int W_out = W - K + 1; #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[((i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0) / 2] #define k4d(i3, i2, i1, i0) Kc[((i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0) / 2] // Insert your GPU convolution kernel code here int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int n, m, h, w, c, p, q; n = blockIdx.x; //current channel m = blockIdx.y; //current ouput feature map h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x; half2 acc = __floats2half2_rn(0.0f,0.0f); half total; if(h < H_out && w < W_out){ for(c=0; c<C; c++) { int d; for (d = 0; d < K*K - 1; d+=2) { p = d / 7; q = d % 7; half2 x_half = x4d(n, c, h+p, w+q); acc = __hadd2(acc, __hmul2(x4d(n, c, h+p, w+q), k4d(m, c, p, q))); } total = __hadd(acc.x, acc.y); total = __hadd(total, __hmul(x4d(n, c, h+6, w+6).x, k4d(m, c, 6, 6).x)); } y4d(n, m, h, w) = total; } #undef y4d #undef x4d #undef k4d } //acc = __hadd(acc, __hmul(x4d(n, c, h+p, w+q), k4d(m, c, p, q))); __host__ void GPUInterface::conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers half* device_y; half2* device_x; half2* device_k; half2* half_x; half* half_y; half2* half_k; const int y_size = B*M*(H-K+1)*(W-K+1); const int x_size = ceil((1.0*B*C*H*W)/2); const int k_size = ceil((1.0*M*C*K*K)/2); half_y = (half*)malloc( y_size*sizeof(half)); half_x = (half2*)malloc( x_size*sizeof(half2)); half_k = (half2*)malloc( k_size*sizeof(half2)); for (int i = 0; i < x_size; i++) { if (2*i+1 < B*C*H*W) { half_x[i] = __floats2half2_rn(host_x[2*i],host_x[2*i+1]); } else { half_x[i] = __floats2half2_rn(host_x[2*i], 0.0f); } } for (int i = 0; i < k_size; i++) { if (2*i+1 < M*C*K*K) { half_k[i] = __floats2half2_rn(host_k[2*i],host_k[2*i+1]); } else { half_k[i] = __floats2half2_rn(host_k[2*i], 0.0f); } } // Allocate memory and copy over the relevant data structures to the GPU cudaMalloc((void**) &device_y, y_size*sizeof(half)); cudaMalloc((void**) &device_x, x_size*sizeof(half2)); cudaMalloc((void**) &device_k, k_size*sizeof(half2)); std::cout<< "M: "<<M<<"\n"; std::cout<< "C: "<<C<<"\n"; std::cout<< "K: "<<K<<"\n"; std::cout<< "H: "<<H<<"\n"; std::cout<< "W: "<<W<<"\n"; std::cout<< "B: "<<B<<"\n"; get_device_properties(); //copy input to GPU cudaMemcpy(device_x, half_x, x_size*sizeof(half2), cudaMemcpyHostToDevice); //cudaMemcpy(device_k, half_k, M*C*K*K*sizeof(half), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Kc, half_k, (7*7*4*16/2)*sizeof(half)); // Set the kernel dimensions and call the kernel int W_out = H - K + 1; //output feature map width int H_out = W - K + 1; //output feature map height int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int H_grid = ceil(H_out*1.0 / TILE_WIDTH); //# of titls in height int Z = H_grid * W_grid; //total number of tile dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); //thread block size dim3 gridDim(B, M, Z); //batch_size, # of output feature maps, total number of tiles shared_mem_kernel<<<gridDim, blockDim>>>(device_y, device_x, B, M, C, H, W, K); // Copy the output back to host cudaMemcpy(half_y, device_y, B*M*(H-K+1)*(W-K+1)*sizeof(half), cudaMemcpyDeviceToHost); for (int i = 0; i < B*M*(H-K+1)*(W-K+1); i++) { host_y[i] = __half2float(half_y[i]); } // Free device memory cudaFree(device_y); cudaFree(device_x); cudaFree(device_k); // Useful snippet for error checking // cudaError_t error = cudaGetLastError(); // if(error != cudaSuccess) // { // std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl; // exit(-1); // } free(half_x); free(half_y); free(half_k); } __host__ void GPUInterface::get_device_properties() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
b1bf979f72e29d6f71b72ce896d2ceddb94b139c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_monte_carlo.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Real *estimate = NULL; hipMalloc(&estimate, XSIZE*YSIZE); hiprandState_t *states = NULL; hipMalloc(&states, XSIZE*YSIZE); int trials = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_monte_carlo), dim3(gridBlock),dim3(threadBlock), 0, 0, estimate,states,trials); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_monte_carlo), dim3(gridBlock),dim3(threadBlock), 0, 0, estimate,states,trials); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_monte_carlo), dim3(gridBlock),dim3(threadBlock), 0, 0, estimate,states,trials); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b1bf979f72e29d6f71b72ce896d2ceddb94b139c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_monte_carlo.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Real *estimate = NULL; cudaMalloc(&estimate, XSIZE*YSIZE); curandState *states = NULL; cudaMalloc(&states, XSIZE*YSIZE); int trials = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_monte_carlo<<<gridBlock,threadBlock>>>(estimate,states,trials); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_monte_carlo<<<gridBlock,threadBlock>>>(estimate,states,trials); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_monte_carlo<<<gridBlock,threadBlock>>>(estimate,states,trials); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
82fbeb7980cef3ec00a69da0bb70d897a84565b6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> /* Q1: Write a CUDA kernels for Matrix Addition: * a) Each row of resultant matrix is computed by one thread * a) Each col of resultant matrix is computed by one thread * a) Each element of resultant matrix is computed by one thread */ __global__ void matAddRow(int *a, int *b, int *sum, int M, int N) { int row = threadIdx.x; if (row >= M) return; int index; for (int j = 0; j < N; j++) { index = row * N + j; sum[index] = a[index] + b[index]; } } __global__ void matAddCol(int *a, int *b, int *sum, int M, int N) { int col = threadIdx.x; if (col >= N) return; int index; for (int i = 0; i < M; i++) { index = i * N + col; sum[index] = a[index] + b[index]; } } __global__ void matAddElement(int *a, int *b, int *sum, int M, int N) { int row = threadIdx.x; int col = threadIdx.y; int index = row * N + col; if (row < M && col < N) sum[index] = a[index] + b[index]; } int main() { int M = 3, N = 3; // host copies of matrices a, b int a[M][N] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; int b[M][N] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; // Separate arrays for the results of the 3 different kernel calls int sum1[M][N]; int sum2[M][N]; int sum3[M][N]; // device copies of variables a, b & sum int *d_a, *d_b, *d_sum; int size = M * N * sizeof(int); // Allocate space for device copies of a, b, sum hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_sum, size); // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch kernels on GPU: hipError_t err; // a) A thread for each row hipLaunchKernelGGL(( matAddRow), dim3(1), dim3(M), 0, 0, d_a, d_b, d_sum, M, N); err = hipMemcpy(&sum1, d_sum, size, hipMemcpyDeviceToHost); // a) A thread for each col hipLaunchKernelGGL(( matAddCol), dim3(1), dim3(N), 0, 0, d_a, d_b, d_sum, M, N); err = hipMemcpy(&sum2, d_sum, size, hipMemcpyDeviceToHost); // c) A thread for each element dim3 dimBlock(M, N, 1); hipLaunchKernelGGL(( matAddElement), dim3(1), dim3(dimBlock), 0, 0, d_a, d_b, d_sum, M, N); err = hipMemcpy(&sum3, d_sum, size, hipMemcpyDeviceToHost); if (err != hipSuccess) printf("CUDA error copying to Host: %s\n", hipGetErrorString(err)); int i, j; printf("One thread per row:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum1[i][j]); printf("\n"); } printf("\nOne thread per col:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum2[i][j]); printf("\n"); } printf("\nOne thread per element:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum3[i][j]); printf("\n"); } // Cleanup hipFree(d_a); hipFree(d_b); hipFree(d_sum); return 0; }
82fbeb7980cef3ec00a69da0bb70d897a84565b6.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> /* Q1: Write a CUDA kernels for Matrix Addition: * a) Each row of resultant matrix is computed by one thread * a) Each col of resultant matrix is computed by one thread * a) Each element of resultant matrix is computed by one thread */ __global__ void matAddRow(int *a, int *b, int *sum, int M, int N) { int row = threadIdx.x; if (row >= M) return; int index; for (int j = 0; j < N; j++) { index = row * N + j; sum[index] = a[index] + b[index]; } } __global__ void matAddCol(int *a, int *b, int *sum, int M, int N) { int col = threadIdx.x; if (col >= N) return; int index; for (int i = 0; i < M; i++) { index = i * N + col; sum[index] = a[index] + b[index]; } } __global__ void matAddElement(int *a, int *b, int *sum, int M, int N) { int row = threadIdx.x; int col = threadIdx.y; int index = row * N + col; if (row < M && col < N) sum[index] = a[index] + b[index]; } int main() { int M = 3, N = 3; // host copies of matrices a, b int a[M][N] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; int b[M][N] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; // Separate arrays for the results of the 3 different kernel calls int sum1[M][N]; int sum2[M][N]; int sum3[M][N]; // device copies of variables a, b & sum int *d_a, *d_b, *d_sum; int size = M * N * sizeof(int); // Allocate space for device copies of a, b, sum cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_sum, size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch kernels on GPU: cudaError err; // a) A thread for each row matAddRow<<<1, M>>>(d_a, d_b, d_sum, M, N); err = cudaMemcpy(&sum1, d_sum, size, cudaMemcpyDeviceToHost); // a) A thread for each col matAddCol<<<1, N>>>(d_a, d_b, d_sum, M, N); err = cudaMemcpy(&sum2, d_sum, size, cudaMemcpyDeviceToHost); // c) A thread for each element dim3 dimBlock(M, N, 1); matAddElement<<<1, dimBlock>>>(d_a, d_b, d_sum, M, N); err = cudaMemcpy(&sum3, d_sum, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) printf("CUDA error copying to Host: %s\n", cudaGetErrorString(err)); int i, j; printf("One thread per row:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum1[i][j]); printf("\n"); } printf("\nOne thread per col:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum2[i][j]); printf("\n"); } printf("\nOne thread per element:\n"); for (i = 0; i < M; i++) { for (j = 0; j < N; j++) printf("%d ", sum3[i][j]); printf("\n"); } // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_sum); return 0; }
77c44f8ca5ba5ce2e8a0c17dda87c7ab712b21b7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "average_subsampling_2d_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../average_subsampling_layer.h" texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref; __global__ void average_subsampling_2d_tex_upd_kernel( float * __restrict output, int subsampling_width, int subsampling_height, float subsampling_weight, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); if (in_bounds) { int input_x = output_x * subsampling_width; int input_y = output_y * subsampling_height; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; float sum = 0.0F; for(int j = 0; j < subsampling_height; ++j) { #pragma unroll 4 for(int i = 0; i < subsampling_width; ++i) { sum += tex1Dfetch(input_tex_ref, current_input_elem_id); current_input_elem_id++; } current_input_elem_id += (input_width - subsampling_width); } output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * subsampling_weight; } } template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT> __global__ void average_subsampling_2d_tex_exact_upd_kernel( float * __restrict output, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); if (in_bounds) { int input_x = output_x * SUBSAMPLING_WIDTH; int input_y = output_y * SUBSAMPLING_HEIGHT; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; float sum = 0.0F; #pragma unroll for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j) { #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { sum += tex1Dfetch(input_tex_ref, current_input_elem_id); current_input_elem_id++; } current_input_elem_id += (input_width - SUBSAMPLING_WIDTH); } output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT)); } } extern __shared__ float arr[]; __global__ void average_subsampling_2d_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int subsampling_width, int subsampling_height, float subsampling_weight, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); int threadblock_size = blockDim.x * blockDim.y * blockDim.z; int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int * offsets = (int *)arr; float * vals = (float *)(arr + threadblock_size * subsampling_width); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); int input_x = output_x * subsampling_width; int input_y = output_y * subsampling_height; int current_input_errors_elem_id; float error; if (in_bounds) { error = output_errors[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] * subsampling_weight; current_input_errors_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; } for(int j = 0; j < subsampling_height; ++j) { int local_id = thread_id * subsampling_width; for(int i = 0; i < subsampling_width; ++i) { offsets[local_id] = in_bounds ? current_input_errors_elem_id : -1; if (in_bounds) vals[local_id] = error; current_input_errors_elem_id++; local_id++; } __syncthreads(); local_id = thread_id; for(int i = 0; i < subsampling_width; ++i) { int offset = offsets[local_id]; float val = vals[local_id]; if (offset >= 0) input_errors[offset] = val; local_id += threadblock_size; } current_input_errors_elem_id += (input_width - subsampling_width); __syncthreads(); } } template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT> __global__ void average_subsampling_2d_deriviative_exact_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); int threadblock_size = blockDim.x * blockDim.y * blockDim.z; int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int * offsets = (int *)arr; float * vals = (float *)(arr + threadblock_size * SUBSAMPLING_WIDTH); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); int input_x = output_x * SUBSAMPLING_WIDTH; int input_y = output_y * SUBSAMPLING_HEIGHT; int current_input_errors_elem_id; float error; if (in_bounds) { error = output_errors[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT)); current_input_errors_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; } #pragma unroll for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j) { int local_id = thread_id * SUBSAMPLING_WIDTH; #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { offsets[local_id] = in_bounds ? current_input_errors_elem_id : -1; if (in_bounds) vals[local_id] = error; current_input_errors_elem_id++; local_id++; } __syncthreads(); local_id = thread_id; #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { int offset = offsets[local_id]; float val = vals[local_id]; if (offset >= 0) input_errors[offset] = val; local_id += threadblock_size; } current_input_errors_elem_id += (input_width - SUBSAMPLING_WIDTH); if (j < (SUBSAMPLING_HEIGHT - 1)) __syncthreads(); } } namespace nnforge { namespace cuda { average_subsampling_2d_layer_updater_cuda::average_subsampling_2d_layer_updater_cuda() { input_tex_ref.addressMode[0] = hipAddressModeBorder; input_tex_ref.normalized = false; } average_subsampling_2d_layer_updater_cuda::~average_subsampling_2d_layer_updater_cuda() { } #define MAX_WINDOW_WIDTH 4 #define MAX_WINDOW_HEIGHT 4 #define launch_exact_kernel_const_const(window_width_const, window_height_const) \ hipLaunchKernelGGL(( average_subsampling_2d_tex_exact_upd_kernel<window_width_const,window_height_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count); #define launch_exact_kernel_const(window_width, window_height_const) \ switch (window_width) \ { \ case 1: \ launch_exact_kernel_const_const(1, window_height_const); \ break; \ case 2: \ launch_exact_kernel_const_const(2, window_height_const); \ break; \ case 3: \ launch_exact_kernel_const_const(3, window_height_const); \ break; \ case 4: \ launch_exact_kernel_const_const(4, window_height_const); \ break; \ }; #define launch_exact_kernel(window_width, window_height) \ switch (window_height) \ { \ case 1: \ launch_exact_kernel_const(window_width, 1); \ break; \ case 2: \ launch_exact_kernel_const(window_width, 2); \ break; \ case 3: \ launch_exact_kernel_const(window_width, 3); \ break; \ case 4: \ launch_exact_kernel_const(window_width, 4); \ break; \ }; #define launch_backprop_exact_kernel_const_const(window_width_const, window_height_const) \ hipLaunchKernelGGL(( average_subsampling_2d_deriviative_exact_upd_kernel<window_width_const,window_height_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, *input_errors_buffer,*output_errors_buffer,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count); #define launch_backprop_exact_kernel_const(window_width, window_height_const) \ switch (window_width) \ { \ case 1: \ launch_backprop_exact_kernel_const_const(1, window_height_const); \ break; \ case 2: \ launch_backprop_exact_kernel_const_const(2, window_height_const); \ break; \ case 3: \ launch_backprop_exact_kernel_const_const(3, window_height_const); \ break; \ case 4: \ launch_backprop_exact_kernel_const_const(4, window_height_const); \ break; \ }; #define launch_backprop_exact_kernel(window_width, window_height) \ switch (window_height) \ { \ case 1: \ launch_backprop_exact_kernel_const(window_width, 1); \ break; \ case 2: \ launch_backprop_exact_kernel_const(window_width, 2); \ break; \ case 3: \ launch_backprop_exact_kernel_const(window_width, 3); \ break; \ case 4: \ launch_backprop_exact_kernel_const(window_width, 4); \ break; \ }; void average_subsampling_2d_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); cuda_safe_call(hipBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float))); int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map_aligned, output_configuration_specific.feature_map_count, entry_count); if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT)) { launch_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]); } else { hipLaunchKernelGGL(( average_subsampling_2d_tex_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, subsampling_sizes[0], subsampling_sizes[1], subsampling_weight, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count); } } void average_subsampling_2d_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map_aligned, output_configuration_specific.feature_map_count, entry_count); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * subsampling_sizes[0] * 2 * sizeof(float); if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT)) { launch_backprop_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]); } else { hipLaunchKernelGGL(( average_subsampling_2d_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, *input_errors_buffer, *output_errors_buffer, subsampling_sizes[0], subsampling_sizes[1], subsampling_weight, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count); } } std::vector<unsigned int> average_subsampling_2d_layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const { std::vector<unsigned int> res; res.push_back(input_elem_count_per_entry); return res; } void average_subsampling_2d_layer_updater_cuda::updater_configured() { if (!different_input) throw neural_network_exception("average_subsampling_2d_layer_updater_cuda is not able to run using the same input"); std::tr1::shared_ptr<const average_subsampling_layer> layer_derived = std::tr1::dynamic_pointer_cast<const average_subsampling_layer>(layer_schema); subsampling_sizes = layer_derived->subsampling_sizes; subsampling_weight = 1.0F / static_cast<float>(subsampling_sizes[0] * subsampling_sizes[1]); } bool average_subsampling_2d_layer_updater_cuda::is_in_place_backprop() const { return false; } } }
77c44f8ca5ba5ce2e8a0c17dda87c7ab712b21b7.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "average_subsampling_2d_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../average_subsampling_layer.h" texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref; __global__ void average_subsampling_2d_tex_upd_kernel( float * __restrict output, int subsampling_width, int subsampling_height, float subsampling_weight, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); if (in_bounds) { int input_x = output_x * subsampling_width; int input_y = output_y * subsampling_height; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; float sum = 0.0F; for(int j = 0; j < subsampling_height; ++j) { #pragma unroll 4 for(int i = 0; i < subsampling_width; ++i) { sum += tex1Dfetch(input_tex_ref, current_input_elem_id); current_input_elem_id++; } current_input_elem_id += (input_width - subsampling_width); } output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * subsampling_weight; } } template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT> __global__ void average_subsampling_2d_tex_exact_upd_kernel( float * __restrict output, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); if (in_bounds) { int input_x = output_x * SUBSAMPLING_WIDTH; int input_y = output_y * SUBSAMPLING_HEIGHT; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; float sum = 0.0F; #pragma unroll for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j) { #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { sum += tex1Dfetch(input_tex_ref, current_input_elem_id); current_input_elem_id++; } current_input_elem_id += (input_width - SUBSAMPLING_WIDTH); } output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT)); } } extern __shared__ float arr[]; __global__ void average_subsampling_2d_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int subsampling_width, int subsampling_height, float subsampling_weight, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); int threadblock_size = blockDim.x * blockDim.y * blockDim.z; int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int * offsets = (int *)arr; float * vals = (float *)(arr + threadblock_size * subsampling_width); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); int input_x = output_x * subsampling_width; int input_y = output_y * subsampling_height; int current_input_errors_elem_id; float error; if (in_bounds) { error = output_errors[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] * subsampling_weight; current_input_errors_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; } for(int j = 0; j < subsampling_height; ++j) { int local_id = thread_id * subsampling_width; for(int i = 0; i < subsampling_width; ++i) { offsets[local_id] = in_bounds ? current_input_errors_elem_id : -1; if (in_bounds) vals[local_id] = error; current_input_errors_elem_id++; local_id++; } __syncthreads(); local_id = thread_id; for(int i = 0; i < subsampling_width; ++i) { int offset = offsets[local_id]; float val = vals[local_id]; if (offset >= 0) input_errors[offset] = val; local_id += threadblock_size; } current_input_errors_elem_id += (input_width - subsampling_width); __syncthreads(); } } template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT> __global__ void average_subsampling_2d_deriviative_exact_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count) { int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int tt = 32 - __clz(output_width - 1); int output_y = elem_id_in_feature_map >> tt; int output_x = elem_id_in_feature_map & ((1 << tt) - 1); int threadblock_size = blockDim.x * blockDim.y * blockDim.z; int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x; int * offsets = (int *)arr; float * vals = (float *)(arr + threadblock_size * SUBSAMPLING_WIDTH); bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count); int input_x = output_x * SUBSAMPLING_WIDTH; int input_y = output_y * SUBSAMPLING_HEIGHT; int current_input_errors_elem_id; float error; if (in_bounds) { error = output_errors[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT)); current_input_errors_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; } #pragma unroll for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j) { int local_id = thread_id * SUBSAMPLING_WIDTH; #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { offsets[local_id] = in_bounds ? current_input_errors_elem_id : -1; if (in_bounds) vals[local_id] = error; current_input_errors_elem_id++; local_id++; } __syncthreads(); local_id = thread_id; #pragma unroll for(int i = 0; i < SUBSAMPLING_WIDTH; ++i) { int offset = offsets[local_id]; float val = vals[local_id]; if (offset >= 0) input_errors[offset] = val; local_id += threadblock_size; } current_input_errors_elem_id += (input_width - SUBSAMPLING_WIDTH); if (j < (SUBSAMPLING_HEIGHT - 1)) __syncthreads(); } } namespace nnforge { namespace cuda { average_subsampling_2d_layer_updater_cuda::average_subsampling_2d_layer_updater_cuda() { input_tex_ref.addressMode[0] = cudaAddressModeBorder; input_tex_ref.normalized = false; } average_subsampling_2d_layer_updater_cuda::~average_subsampling_2d_layer_updater_cuda() { } #define MAX_WINDOW_WIDTH 4 #define MAX_WINDOW_HEIGHT 4 #define launch_exact_kernel_const_const(window_width_const, window_height_const) \ average_subsampling_2d_tex_exact_upd_kernel<window_width_const,window_height_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count); #define launch_exact_kernel_const(window_width, window_height_const) \ switch (window_width) \ { \ case 1: \ launch_exact_kernel_const_const(1, window_height_const); \ break; \ case 2: \ launch_exact_kernel_const_const(2, window_height_const); \ break; \ case 3: \ launch_exact_kernel_const_const(3, window_height_const); \ break; \ case 4: \ launch_exact_kernel_const_const(4, window_height_const); \ break; \ }; #define launch_exact_kernel(window_width, window_height) \ switch (window_height) \ { \ case 1: \ launch_exact_kernel_const(window_width, 1); \ break; \ case 2: \ launch_exact_kernel_const(window_width, 2); \ break; \ case 3: \ launch_exact_kernel_const(window_width, 3); \ break; \ case 4: \ launch_exact_kernel_const(window_width, 4); \ break; \ }; #define launch_backprop_exact_kernel_const_const(window_width_const, window_height_const) \ average_subsampling_2d_deriviative_exact_upd_kernel<window_width_const,window_height_const><<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(*input_errors_buffer,*output_errors_buffer,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count); #define launch_backprop_exact_kernel_const(window_width, window_height_const) \ switch (window_width) \ { \ case 1: \ launch_backprop_exact_kernel_const_const(1, window_height_const); \ break; \ case 2: \ launch_backprop_exact_kernel_const_const(2, window_height_const); \ break; \ case 3: \ launch_backprop_exact_kernel_const_const(3, window_height_const); \ break; \ case 4: \ launch_backprop_exact_kernel_const_const(4, window_height_const); \ break; \ }; #define launch_backprop_exact_kernel(window_width, window_height) \ switch (window_height) \ { \ case 1: \ launch_backprop_exact_kernel_const(window_width, 1); \ break; \ case 2: \ launch_backprop_exact_kernel_const(window_width, 2); \ break; \ case 3: \ launch_backprop_exact_kernel_const(window_width, 3); \ break; \ case 4: \ launch_backprop_exact_kernel_const(window_width, 4); \ break; \ }; void average_subsampling_2d_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cuda_safe_call(cudaBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float))); int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map_aligned, output_configuration_specific.feature_map_count, entry_count); if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT)) { launch_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]); } else { average_subsampling_2d_tex_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_neurons_buffer, subsampling_sizes[0], subsampling_sizes[1], subsampling_weight, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count); } } void average_subsampling_2d_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map_aligned, output_configuration_specific.feature_map_count, entry_count); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * subsampling_sizes[0] * 2 * sizeof(float); if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT)) { launch_backprop_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]); } else { average_subsampling_2d_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( *input_errors_buffer, *output_errors_buffer, subsampling_sizes[0], subsampling_sizes[1], subsampling_weight, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count); } } std::vector<unsigned int> average_subsampling_2d_layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const { std::vector<unsigned int> res; res.push_back(input_elem_count_per_entry); return res; } void average_subsampling_2d_layer_updater_cuda::updater_configured() { if (!different_input) throw neural_network_exception("average_subsampling_2d_layer_updater_cuda is not able to run using the same input"); std::tr1::shared_ptr<const average_subsampling_layer> layer_derived = std::tr1::dynamic_pointer_cast<const average_subsampling_layer>(layer_schema); subsampling_sizes = layer_derived->subsampling_sizes; subsampling_weight = 1.0F / static_cast<float>(subsampling_sizes[0] * subsampling_sizes[1]); } bool average_subsampling_2d_layer_updater_cuda::is_in_place_backprop() const { return false; } } }
a8c71fbc342829237f865fc5b62c2b3e21740e58.hip
// !!! This is a file automatically generated by hipify!!! #include <pybind11/numpy.h> #include <pybind11/pybind11.h> #include "../model/decoder.h" #include "../model/encoder.h" #include "../proto/transformer_weight.h" #include "../tools/util.h" namespace py = pybind11; #ifdef FP16_MODE const lightseq::cuda::OperationType decoder_optype = lightseq::cuda::OperationType::FP16; #else const lightseq::cuda::OperationType decoder_optype = lightseq::cuda::OperationType::FP32; #endif namespace lightseq { namespace cuda { class TransformerDecoder { private: typedef lightseq::cuda::OperationTypeTraits<decoder_optype> optraits; lightseq::cuda::Decoder<decoder_optype> *decoder_; optraits::DataType *d_encoder_output_; int *d_output_; int *d_padding_mask_; int _max_batch_size; hipStream_t stream_; hipblasHandle_t hd_; lightseq::cuda::TransformerWeight<decoder_optype> tw_; public: TransformerDecoder(const std::string weight_path, const int max_batch_size) : stream_(nullptr), hd_(nullptr), decoder_(nullptr) { /* ---step1. init environment--- */ _max_batch_size = max_batch_size; hipError_t cuerr = hipSetDevice(0); if (cuerr != hipSuccess) { throw std::runtime_error(hipGetErrorString(cuerr)); } cuerr = hipStreamCreate(&stream_); if (cuerr != hipSuccess) { throw std::runtime_error(hipGetErrorString(cuerr)); } hipblasStatus_t cublaserr = hipblasCreate(&hd_); if (cublaserr != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("Failed to creat cublas handle "); } cublaserr = hipblasSetStream(hd_, stream_); if (cublaserr != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("Failed to set stream for cublas handle"); } /* ---step2. load model weights into GPU memory--- */ // saved in custom proto file std::string model_weights_path = weight_path; std::string res = tw_.initializing(model_weights_path, true); if (!res.empty()) { throw std::runtime_error(res); } if (tw_._sampling_method == "topk" || tw_._sampling_method == "topp") { tw_._beam_size = 1; } tw_.print_model_config(); /* step3. instantiate encoder and decoder, init the gpu memory buffer. using thrust vector to avoid manage gpu memory by hand */ // instantiate encoder // FIXME: padding mask should be passed from user // thrust::device_vector<int> d_padding_mask_ = // std::vector<int>(_max_batch_size * tw_._max_step, 0); lightseq::cuda::CHECK_GPU_ERROR(hipMalloc( &d_padding_mask_, _max_batch_size * tw_._max_step * sizeof(int))); lightseq::cuda::CHECK_GPU_ERROR(hipMalloc( &d_encoder_output_, _max_batch_size * tw_._max_step * tw_._hidden_size * sizeof(optraits::DataType))); lightseq::cuda::CHECK_GPU_ERROR(hipMalloc( &d_output_, _max_batch_size * tw_._beam_size * tw_._max_step * sizeof(int))); decoder_ = new lightseq::cuda::Decoder<decoder_optype>( _max_batch_size, d_padding_mask_, d_encoder_output_, d_output_, tw_, stream_, hd_, true); res = decoder_->check(); if (!res.empty()) { throw std::runtime_error(res); } long buf_bytesize = decoder_->compute_buffer_bytesize(); std::cout << "Allocated " << buf_bytesize / (1024 * 1024) << "MB GPU buffer for transformer decoder" << std::endl; void *d_buf_; // encoder and decoder use the same buffer to save gpu memory useage lightseq::cuda::CHECK_GPU_ERROR( hipMalloc((void **)&d_buf_, (size_t)buf_bytesize)); decoder_->init_buffer(d_buf_); cuerr = hipStreamSynchronize(stream_); if (cuerr != hipSuccess) { std::cout << "failed to init GPU for transformer: " << std::endl; std::runtime_error(std::string(hipGetErrorString(cuerr))); } } py::array_t<int> infer( py::array_t<float, py::array::c_style | py::array::forcecast> encoder_output, py::array_t<int, py::array::c_style | py::array::forcecast> encoder_mask) { auto encoder_out = encoder_output.mutable_unchecked<3>(); auto encoder_mask_out = encoder_mask.mutable_unchecked<2>(); const float *encoder_output_data = encoder_out.data(0, 0, 0); const int *encoder_mask_data = encoder_mask_out.data(0, 0); std::vector<optraits::DataType> h_encoder_out(encoder_out.size()); for (auto i = 0; i < encoder_out.size(); i++) { optraits::DataType data; if (decoder_optype == lightseq::cuda::OperationType::FP16) { data = __float2half_rn(encoder_output_data[i]); } else { data = encoder_output_data[i]; } h_encoder_out[i] = data; } lightseq::cuda::CHECK_GPU_ERROR( hipMemcpyAsync(d_encoder_output_, h_encoder_out.data(), sizeof(optraits::DataType) * encoder_out.size(), hipMemcpyHostToDevice, stream_)); lightseq::cuda::CHECK_GPU_ERROR( hipMemcpyAsync(d_padding_mask_, encoder_mask_data, sizeof(int) * encoder_mask_out.size(), hipMemcpyHostToDevice, stream_)); int batch_size = encoder_out.shape(0); int batch_seq_len = encoder_out.shape(1); decoder_->run_one_infer(batch_size, batch_seq_len); int tokens_size = decoder_->_cur_step + 1; int beam_size = tw_._beam_size; auto tokens = py::array_t<int>({batch_size, beam_size, tokens_size}); int *tokens_data = tokens.mutable_data(0, 0); lightseq::cuda::CHECK_GPU_ERROR(hipMemcpy(tokens_data, d_output_, sizeof(int) * tokens.size(), hipMemcpyDeviceToHost)); return tokens; } }; } // namespace cuda } // namespace lightseq
a8c71fbc342829237f865fc5b62c2b3e21740e58.cu
#include <pybind11/numpy.h> #include <pybind11/pybind11.h> #include "../model/decoder.h" #include "../model/encoder.h" #include "../proto/transformer_weight.h" #include "../tools/util.h" namespace py = pybind11; #ifdef FP16_MODE const lightseq::cuda::OperationType decoder_optype = lightseq::cuda::OperationType::FP16; #else const lightseq::cuda::OperationType decoder_optype = lightseq::cuda::OperationType::FP32; #endif namespace lightseq { namespace cuda { class TransformerDecoder { private: typedef lightseq::cuda::OperationTypeTraits<decoder_optype> optraits; lightseq::cuda::Decoder<decoder_optype> *decoder_; optraits::DataType *d_encoder_output_; int *d_output_; int *d_padding_mask_; int _max_batch_size; cudaStream_t stream_; cublasHandle_t hd_; lightseq::cuda::TransformerWeight<decoder_optype> tw_; public: TransformerDecoder(const std::string weight_path, const int max_batch_size) : stream_(nullptr), hd_(nullptr), decoder_(nullptr) { /* ---step1. init environment--- */ _max_batch_size = max_batch_size; cudaError_t cuerr = cudaSetDevice(0); if (cuerr != cudaSuccess) { throw std::runtime_error(cudaGetErrorString(cuerr)); } cuerr = cudaStreamCreate(&stream_); if (cuerr != cudaSuccess) { throw std::runtime_error(cudaGetErrorString(cuerr)); } cublasStatus_t cublaserr = cublasCreate(&hd_); if (cublaserr != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("Failed to creat cublas handle "); } cublaserr = cublasSetStream(hd_, stream_); if (cublaserr != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("Failed to set stream for cublas handle"); } /* ---step2. load model weights into GPU memory--- */ // saved in custom proto file std::string model_weights_path = weight_path; std::string res = tw_.initializing(model_weights_path, true); if (!res.empty()) { throw std::runtime_error(res); } if (tw_._sampling_method == "topk" || tw_._sampling_method == "topp") { tw_._beam_size = 1; } tw_.print_model_config(); /* step3. instantiate encoder and decoder, init the gpu memory buffer. using thrust vector to avoid manage gpu memory by hand */ // instantiate encoder // FIXME: padding mask should be passed from user // thrust::device_vector<int> d_padding_mask_ = // std::vector<int>(_max_batch_size * tw_._max_step, 0); lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc( &d_padding_mask_, _max_batch_size * tw_._max_step * sizeof(int))); lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc( &d_encoder_output_, _max_batch_size * tw_._max_step * tw_._hidden_size * sizeof(optraits::DataType))); lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc( &d_output_, _max_batch_size * tw_._beam_size * tw_._max_step * sizeof(int))); decoder_ = new lightseq::cuda::Decoder<decoder_optype>( _max_batch_size, d_padding_mask_, d_encoder_output_, d_output_, tw_, stream_, hd_, true); res = decoder_->check(); if (!res.empty()) { throw std::runtime_error(res); } long buf_bytesize = decoder_->compute_buffer_bytesize(); std::cout << "Allocated " << buf_bytesize / (1024 * 1024) << "MB GPU buffer for transformer decoder" << std::endl; void *d_buf_; // encoder and decoder use the same buffer to save gpu memory useage lightseq::cuda::CHECK_GPU_ERROR( cudaMalloc((void **)&d_buf_, (size_t)buf_bytesize)); decoder_->init_buffer(d_buf_); cuerr = cudaStreamSynchronize(stream_); if (cuerr != cudaSuccess) { std::cout << "failed to init GPU for transformer: " << std::endl; std::runtime_error(std::string(cudaGetErrorString(cuerr))); } } py::array_t<int> infer( py::array_t<float, py::array::c_style | py::array::forcecast> encoder_output, py::array_t<int, py::array::c_style | py::array::forcecast> encoder_mask) { auto encoder_out = encoder_output.mutable_unchecked<3>(); auto encoder_mask_out = encoder_mask.mutable_unchecked<2>(); const float *encoder_output_data = encoder_out.data(0, 0, 0); const int *encoder_mask_data = encoder_mask_out.data(0, 0); std::vector<optraits::DataType> h_encoder_out(encoder_out.size()); for (auto i = 0; i < encoder_out.size(); i++) { optraits::DataType data; if (decoder_optype == lightseq::cuda::OperationType::FP16) { data = __float2half_rn(encoder_output_data[i]); } else { data = encoder_output_data[i]; } h_encoder_out[i] = data; } lightseq::cuda::CHECK_GPU_ERROR( cudaMemcpyAsync(d_encoder_output_, h_encoder_out.data(), sizeof(optraits::DataType) * encoder_out.size(), cudaMemcpyHostToDevice, stream_)); lightseq::cuda::CHECK_GPU_ERROR( cudaMemcpyAsync(d_padding_mask_, encoder_mask_data, sizeof(int) * encoder_mask_out.size(), cudaMemcpyHostToDevice, stream_)); int batch_size = encoder_out.shape(0); int batch_seq_len = encoder_out.shape(1); decoder_->run_one_infer(batch_size, batch_seq_len); int tokens_size = decoder_->_cur_step + 1; int beam_size = tw_._beam_size; auto tokens = py::array_t<int>({batch_size, beam_size, tokens_size}); int *tokens_data = tokens.mutable_data(0, 0); lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpy(tokens_data, d_output_, sizeof(int) * tokens.size(), cudaMemcpyDeviceToHost)); return tokens; } }; } // namespace cuda } // namespace lightseq
8bd19ae8eabdfe0adcbcb94267c2eea02ce48b40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(hipMalloc(&d_red, channelSize)); checkCudaErrors(hipMalloc(&d_green, channelSize)); checkCudaErrors(hipMalloc(&d_blue, channelSize)); checkCudaErrors(hipMalloc(&d_x__, channelSize)); checkCudaErrors(hipMalloc(&d_y__, channelSize)); checkCudaErrors(hipMalloc(&d_logY__, channelSize)); checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 32, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads), dim3(numThreads), 0, 0, d_cdf__, d_cdf_normalized, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 32, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(hipFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(hipFree(d_x__)); checkCudaErrors(hipFree(d_y__)); checkCudaErrors(hipFree(d_logY__)); checkCudaErrors(hipFree(d_cdf__)); }
8bd19ae8eabdfe0adcbcb94267c2eea02ce48b40.cu
#include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(cudaMalloc(&d_red, channelSize)); checkCudaErrors(cudaMalloc(&d_green, channelSize)); checkCudaErrors(cudaMalloc(&d_blue, channelSize)); checkCudaErrors(cudaMalloc(&d_x__, channelSize)); checkCudaErrors(cudaMalloc(&d_y__, channelSize)); checkCudaErrors(cudaMalloc(&d_logY__, channelSize)); checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 32, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel normalize_cdf<<< (numBins + numThreads - 1) / numThreads, numThreads>>>(d_cdf__, d_cdf_normalized, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 32, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(cudaFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(cudaFree(d_x__)); checkCudaErrors(cudaFree(d_y__)); checkCudaErrors(cudaFree(d_logY__)); checkCudaErrors(cudaFree(d_cdf__)); }
3a221f062ef593d863d0443f9b0a306f42f6b1d4.hip
// !!! This is a file automatically generated by hipify!!! #include <gtc/matrix_access.hpp> #include "CudaRender.cuh" namespace render { namespace cuda { __ATTRIBS__ static inline glm::vec4 convert_to_vec4(const glm::vec3& input) { return glm::vec4(input.x, input.y, input.z, 1.0); } __ATTRIBS__ static inline glm::vec3 covert_to_vec3(const glm::vec4& input) { return glm::vec3(input.x, input.y, input.z); } Camera::Camera(Point position, Point look_at, float field_of_view, int width, int height) : _position(std::move(position)) , _look_at(look_at) , _field_of_view(field_of_view) , _aspect_ratio(float(width) / float(height)) , _width(width) , _height(height) {} __ATTRIBS__ Ray Camera::emit_ray(int height_pos, int width_pos) const { Vector direction = Vector(width_pos - _width / 2.0, (_height / 2.0 - height_pos), (_height / 2.0) / tanf(_field_of_view / 2.0)) * cam_to_world(); return Ray(_position, glm::normalize(direction)); } __ATTRIBS__ void Camera::move_forward() { _position.z = _position.z - _speed; } __ATTRIBS__ void Camera::move_backward() { _position.z = _position.z + _speed; } __ATTRIBS__ void Camera::move_right() { _position.x = _position.x + _speed; } __ATTRIBS__ void Camera::move_left() { _position.x = _position.x - _speed; } __ATTRIBS__ void Camera::move_up() { _position.y = _position.y + _speed; } __ATTRIBS__ void Camera::move_down() { _position.y = _position.y - _speed; } __ATTRIBS__ void Camera::update_position(const Point& point) { _position = point; } __ATTRIBS__ const Point& Camera::position() const { return _position; } __ATTRIBS__ int Camera::width() const { return _width; } __ATTRIBS__ int Camera::height() const { return _height; } __ATTRIBS__ glm::mat3x3 Camera::cam_to_world() const { const Vector _up(0.0, 1.0, 0.0); const float EPSILON = 0.0000001; Vector forward = glm::normalize(_look_at - _position); Vector right = (::fabs(::fabs(glm::dot(_up, forward)) - 1.0f) > EPSILON) ? -glm::cross(glm::normalize(_up), forward) : Vector(1.0, 0.0, 0.0); Vector up = glm::normalize(glm::cross(forward, right)); return glm::mat3x3 { {right.x, up.x, forward.x}, {right.y, up.y, forward.y}, {right.z, up.z, forward.z}, }; } __ATTRIBS__ void Camera::dump() const { printf("<Camera: location - (%.4f;%.4f;%.4f), WxH - %dx%d, " "aspect ratio - %.3f, field of view - %.3f, " "look at - (%.4f;%.4f%.4f)>\n", _position.x, _position.y, _position.z, _width, _height, _aspect_ratio, _field_of_view, _look_at.x, _look_at.y, _look_at.z); } }}
3a221f062ef593d863d0443f9b0a306f42f6b1d4.cu
#include <gtc/matrix_access.hpp> #include "CudaRender.cuh" namespace render { namespace cuda { __ATTRIBS__ static inline glm::vec4 convert_to_vec4(const glm::vec3& input) { return glm::vec4(input.x, input.y, input.z, 1.0); } __ATTRIBS__ static inline glm::vec3 covert_to_vec3(const glm::vec4& input) { return glm::vec3(input.x, input.y, input.z); } Camera::Camera(Point position, Point look_at, float field_of_view, int width, int height) : _position(std::move(position)) , _look_at(look_at) , _field_of_view(field_of_view) , _aspect_ratio(float(width) / float(height)) , _width(width) , _height(height) {} __ATTRIBS__ Ray Camera::emit_ray(int height_pos, int width_pos) const { Vector direction = Vector(width_pos - _width / 2.0, (_height / 2.0 - height_pos), (_height / 2.0) / tanf(_field_of_view / 2.0)) * cam_to_world(); return Ray(_position, glm::normalize(direction)); } __ATTRIBS__ void Camera::move_forward() { _position.z = _position.z - _speed; } __ATTRIBS__ void Camera::move_backward() { _position.z = _position.z + _speed; } __ATTRIBS__ void Camera::move_right() { _position.x = _position.x + _speed; } __ATTRIBS__ void Camera::move_left() { _position.x = _position.x - _speed; } __ATTRIBS__ void Camera::move_up() { _position.y = _position.y + _speed; } __ATTRIBS__ void Camera::move_down() { _position.y = _position.y - _speed; } __ATTRIBS__ void Camera::update_position(const Point& point) { _position = point; } __ATTRIBS__ const Point& Camera::position() const { return _position; } __ATTRIBS__ int Camera::width() const { return _width; } __ATTRIBS__ int Camera::height() const { return _height; } __ATTRIBS__ glm::mat3x3 Camera::cam_to_world() const { const Vector _up(0.0, 1.0, 0.0); const float EPSILON = 0.0000001; Vector forward = glm::normalize(_look_at - _position); Vector right = (std::fabs(std::fabs(glm::dot(_up, forward)) - 1.0f) > EPSILON) ? -glm::cross(glm::normalize(_up), forward) : Vector(1.0, 0.0, 0.0); Vector up = glm::normalize(glm::cross(forward, right)); return glm::mat3x3 { {right.x, up.x, forward.x}, {right.y, up.y, forward.y}, {right.z, up.z, forward.z}, }; } __ATTRIBS__ void Camera::dump() const { printf("<Camera: location - (%.4f;%.4f;%.4f), WxH - %dx%d, " "aspect ratio - %.3f, field of view - %.3f, " "look at - (%.4f;%.4f%.4f)>\n", _position.x, _position.y, _position.z, _width, _height, _aspect_ratio, _field_of_view, _look_at.x, _look_at.y, _look_at.z); } }}
e520e50c3343c924e2a5a77499f9fa8470df6296.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "knapsackKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *profits = NULL; hipMalloc(&profits, XSIZE*YSIZE); int *weights = NULL; hipMalloc(&weights, XSIZE*YSIZE); int *f = NULL; hipMalloc(&f, XSIZE*YSIZE); int capacity = 1; int i = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( knapsackKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, profits,weights,f,capacity,i); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( knapsackKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, profits,weights,f,capacity,i); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( knapsackKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, profits,weights,f,capacity,i); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e520e50c3343c924e2a5a77499f9fa8470df6296.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "knapsackKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *profits = NULL; cudaMalloc(&profits, XSIZE*YSIZE); int *weights = NULL; cudaMalloc(&weights, XSIZE*YSIZE); int *f = NULL; cudaMalloc(&f, XSIZE*YSIZE); int capacity = 1; int i = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); knapsackKernel<<<gridBlock,threadBlock>>>(profits,weights,f,capacity,i); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { knapsackKernel<<<gridBlock,threadBlock>>>(profits,weights,f,capacity,i); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { knapsackKernel<<<gridBlock,threadBlock>>>(profits,weights,f,capacity,i); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
76be060a409bf4b605c2d5deec9df574e1e2ac6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcTurbulentViscosity.cu * * Created on: 23-07-2015 * Author: Kamil Szewc * */ #include "../../../sph.h" #include "../../../hlp.h" __global__ void calcTurbulentViscosity(Particle *p, Parameters *par) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; while (tid < par->N) { if (p[tid].phaseType == 0) { p[tid].nut = pow2(0.12f * par->DR) * p[tid].str; } tid += blockDim.x * gridDim.x; } }
76be060a409bf4b605c2d5deec9df574e1e2ac6e.cu
/* * calcTurbulentViscosity.cu * * Created on: 23-07-2015 * Author: Kamil Szewc * */ #include "../../../sph.h" #include "../../../hlp.h" __global__ void calcTurbulentViscosity(Particle *p, Parameters *par) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; while (tid < par->N) { if (p[tid].phaseType == 0) { p[tid].nut = pow2(0.12f * par->DR) * p[tid].str; } tid += blockDim.x * gridDim.x; } }
705fec46e626b156e45e77075bbfced434403ab2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.cu" #else #include <THHUNN/common.h> // 4d tensor B x D x H x W void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int osizeW, int osizeH) { THCUNN_assertSameGPU(state, 2, input, output); scalar_t *output_data; scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t istrideD = input->stride(0); int64_t istrideH = input->stride(1); int64_t istrideW = input->stride(2); input_data = THCTensor_(data)(state, input); THCTensor_(resize3d)(state, output, sizeD, osizeH, osizeW); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run averagepool kernel hipLaunchKernelGGL(( adaptiveaveragepool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(hipGetLastError()); } else { input = THCTensor_(newContiguous)(state, input); int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t istrideD = input->stride(1); int64_t istrideH = input->stride(2); int64_t istrideW = input->stride(3); input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, sizeB, sizeD, osizeH, osizeW); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run averagepool kernel hipLaunchKernelGGL(( adaptiveaveragepool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(hipGetLastError()); // clean THCTensor_(free)(state, input); } } void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput) { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); scalar_t *gradInput_data; scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t osizeH = gradOutput->size(1); int64_t osizeW = gradOutput->size(2); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel hipLaunchKernelGGL(( adaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(hipGetLastError()); } else { int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t osizeH = gradOutput->size(2); int64_t osizeW = gradOutput->size(3); //bool atomic = //(isizeW%osizeW != 0) || (isizeH%osizeH != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(hipGetLastError()); } // clean THCTensor_(free)(state,gradOutput); } #endif
705fec46e626b156e45e77075bbfced434403ab2.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.cu" #else #include <THCUNN/common.h> // 4d tensor B x D x H x W void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int osizeW, int osizeH) { THCUNN_assertSameGPU(state, 2, input, output); scalar_t *output_data; scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t istrideD = input->stride(0); int64_t istrideH = input->stride(1); int64_t istrideW = input->stride(2); input_data = THCTensor_(data)(state, input); THCTensor_(resize3d)(state, output, sizeD, osizeH, osizeW); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run averagepool kernel adaptiveaveragepool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(cudaGetLastError()); } else { input = THCTensor_(newContiguous)(state, input); int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t istrideD = input->stride(1); int64_t istrideH = input->stride(2); int64_t istrideW = input->stride(3); input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, sizeB, sizeD, osizeH, osizeW); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run averagepool kernel adaptiveaveragepool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(cudaGetLastError()); // clean THCTensor_(free)(state, input); } } void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput) { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); scalar_t *gradInput_data; scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t osizeH = gradOutput->size(1); int64_t osizeW = gradOutput->size(2); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptiveaveragegradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel adaptiveaveragegradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(cudaGetLastError()); } else { int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t osizeH = gradOutput->size(2); int64_t osizeW = gradOutput->size(3); //bool atomic = //(isizeW%osizeW != 0) || (isizeH%osizeH != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = max((int)(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptiveaveragegradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically adaptiveaveragegradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(cudaGetLastError()); } // clean THCTensor_(free)(state,gradOutput); } #endif
b37d42ee785a3556ba9dbd80f5fcd789d5474c49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> static const int NTHREADS = 32; __global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(hipLaunchParm lp, float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int n_classes) { #if defined(__HIP_PLATFORM_NVCC__) assert(hipThreadIdx_x == 0 && hipThreadIdx_y == 0 && hipThreadIdx_z == 0); #endif // TODO: T4951791 Reuse code between updateOutput_kernel1 and // updateOutput_kernel. int t = (int)*target - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif float cur_weight = weights ? weights[t] : 1.0f; *output = -cur_weight * input[t]; *total_weight = cur_weight; if (size_average && *total_weight > 0) { *output /= *total_weight; } } __global__ void cunn_ClassNLLCriterion_updateOutput_kernel(hipLaunchParm lp, float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int nframe, int ndim, int n_classes) { __shared__ float shInputs[NTHREADS], acc_weight[NTHREADS]; int i, t; float cur_weight; shInputs[hipThreadIdx_x] = 0.0f; acc_weight[hipThreadIdx_x] = 0.0f; for (i = hipThreadIdx_x; i < nframe; i += NTHREADS) { t = target[i] - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif cur_weight = weights ? weights[t] : 1.0f; shInputs[hipThreadIdx_x] -= input[i * ndim + t] * cur_weight; acc_weight[hipThreadIdx_x] += cur_weight; } __syncthreads(); // TODO: T4951791 Reuse code between updateOutput_kernel1 and // updateOutput_kernel if (hipThreadIdx_x == 0) { *output = *total_weight = 0; for (i = 0; i < NTHREADS; ++i){ *output += shInputs[i]; *total_weight += acc_weight[i]; } if (size_average && *total_weight > 0) { *output /= *total_weight; } } } __global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(hipLaunchParm lp, float* gradInput, float* weights, long* target, float* total_weight, int size_average, int n_classes) { if (*total_weight <= 0) { return; } float norm = size_average ? (1.0f / *total_weight) : 1.0f; int t = (int)*target - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif gradInput[t] = -(weights ? weights[t] : 1.0f) * norm; } __global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(hipLaunchParm lp, float *gradInput, long *target, float *weights, float *total_weight, int size_average, int nframe, int ndim, int n_classes) { if (*total_weight <= 0) { return; } int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; for (i = hipThreadIdx_x; i < nframe; i += NTHREADS) { t = (int)target[i] - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif gradInput[i * ndim + t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaClassNLLCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { if (THCudaLongTensor_nDimension(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCudaTensor_nDimension(state, input); int n_classes = THCudaTensor_size(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (THCudaTensor_nDimension(state, input) > 2) { THArgCheck(0, 2, "vector or matrix expected"); } if (weights && THCudaTensor_nElement(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); if (THCudaTensor_nDimension(state, input) == 1) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateOutput_kernel1), dim3(1), dim3(1), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, n_classes ); } else if (THCudaTensor_nDimension(state, input) == 2) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateOutput_kernel), dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), n_classes ); } THCudaCheck(hipGetLastError()); if (weights) { THCudaTensor_free(state, weights); } THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaClassNLLCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { if (THCudaLongTensor_nDimension(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCudaTensor_nDimension(state, input); int n_classes = THCudaTensor_size(state, input, n_dims - 1); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (THCudaTensor_nDimension(state, input) > 2) { THArgCheck(0, 2, "vector or matrix expected"); } if (weights && THCudaTensor_nElement(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); if (THCudaTensor_nDimension(state, input) == 1) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateGradInput_kernel1), dim3(1), dim3(1), 0, THCState_getCurrentStream(state), gradInput_data, weights_data, target_data, total_weight_data, sizeAverage, n_classes ); } else { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateGradInput_kernel), dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), n_classes ); } THCudaCheck(hipGetLastError()); if (weights) { THCudaTensor_free(state, weights); } THCudaLongTensor_free(state, target); }
b37d42ee785a3556ba9dbd80f5fcd789d5474c49.cu
#include "hip/hip_runtime.h" #include "THCUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> static const int NTHREADS = 32; __global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(hipLaunchParm lp, float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int n_classes) { #if defined(__HIP_PLATFORM_NVCC__) assert(hipThreadIdx_x == 0 && hipThreadIdx_y == 0 && hipThreadIdx_z == 0); #endif // TODO: T4951791 Reuse code between updateOutput_kernel1 and // updateOutput_kernel. int t = (int)*target - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif float cur_weight = weights ? weights[t] : 1.0f; *output = -cur_weight * input[t]; *total_weight = cur_weight; if (size_average && *total_weight > 0) { *output /= *total_weight; } } __global__ void cunn_ClassNLLCriterion_updateOutput_kernel(hipLaunchParm lp, float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int nframe, int ndim, int n_classes) { __shared__ float shInputs[NTHREADS], acc_weight[NTHREADS]; int i, t; float cur_weight; shInputs[hipThreadIdx_x] = 0.0f; acc_weight[hipThreadIdx_x] = 0.0f; for (i = hipThreadIdx_x; i < nframe; i += NTHREADS) { t = target[i] - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif cur_weight = weights ? weights[t] : 1.0f; shInputs[hipThreadIdx_x] -= input[i * ndim + t] * cur_weight; acc_weight[hipThreadIdx_x] += cur_weight; } __syncthreads(); // TODO: T4951791 Reuse code between updateOutput_kernel1 and // updateOutput_kernel if (hipThreadIdx_x == 0) { *output = *total_weight = 0; for (i = 0; i < NTHREADS; ++i){ *output += shInputs[i]; *total_weight += acc_weight[i]; } if (size_average && *total_weight > 0) { *output /= *total_weight; } } } __global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(hipLaunchParm lp, float* gradInput, float* weights, long* target, float* total_weight, int size_average, int n_classes) { if (*total_weight <= 0) { return; } float norm = size_average ? (1.0f / *total_weight) : 1.0f; int t = (int)*target - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif gradInput[t] = -(weights ? weights[t] : 1.0f) * norm; } __global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(hipLaunchParm lp, float *gradInput, long *target, float *weights, float *total_weight, int size_average, int nframe, int ndim, int n_classes) { if (*total_weight <= 0) { return; } int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; for (i = hipThreadIdx_x; i < nframe; i += NTHREADS) { t = (int)target[i] - TH_INDEX_BASE; #if defined(__HIP_PLATFORM_NVCC__) assert(t >= 0 && t < n_classes); #endif gradInput[i * ndim + t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaClassNLLCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { if (THCudaLongTensor_nDimension(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCudaTensor_nDimension(state, input); int n_classes = THCudaTensor_size(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (THCudaTensor_nDimension(state, input) > 2) { THArgCheck(0, 2, "vector or matrix expected"); } if (weights && THCudaTensor_nElement(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); if (THCudaTensor_nDimension(state, input) == 1) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateOutput_kernel1), dim3(1), dim3(1), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, n_classes ); } else if (THCudaTensor_nDimension(state, input) == 2) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateOutput_kernel), dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), n_classes ); } THCudaCheck(hipGetLastError()); if (weights) { THCudaTensor_free(state, weights); } THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaClassNLLCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { if (THCudaLongTensor_nDimension(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCudaTensor_nDimension(state, input); int n_classes = THCudaTensor_size(state, input, n_dims - 1); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (THCudaTensor_nDimension(state, input) > 2) { THArgCheck(0, 2, "vector or matrix expected"); } if (weights && THCudaTensor_nElement(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); if (THCudaTensor_nDimension(state, input) == 1) { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateGradInput_kernel1), dim3(1), dim3(1), 0, THCState_getCurrentStream(state), gradInput_data, weights_data, target_data, total_weight_data, sizeAverage, n_classes ); } else { hipLaunchKernel(HIP_KERNEL_NAME(cunn_ClassNLLCriterion_updateGradInput_kernel), dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), n_classes ); } THCudaCheck(hipGetLastError()); if (weights) { THCudaTensor_free(state, weights); } THCudaLongTensor_free(state, target); }
c6db9d64af24505e075aad47b43b1e5e9bb8f062.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <arbor/fvm_types.hpp> #include "backends/threshold_crossing.hpp" #include "cuda_common.hpp" #include "stack_cu.hpp" namespace arb { namespace gpu { namespace kernel { template <typename T> __device__ inline T lerp(T a, T b, T u) { return std::fma(u, b, std::fma(-u, a, a)); } /// kernel used to test for threshold crossing test code. /// params: /// t : current time (ms) /// t_prev : time of last test (ms) /// size : number of values to test /// is_crossed : crossing state at time t_prev (true or false) /// prev_values : values at sample points (see index) sampled at t_prev /// index : index with locations in values to test for crossing /// values : values at t_prev /// thresholds : threshold values to watch for crossings __global__ void test_thresholds_impl( int size, const fvm_index_type* cv_to_cell, const fvm_value_type* t_after, const fvm_value_type* t_before, stack_storage<threshold_crossing>& stack, fvm_index_type* is_crossed, fvm_value_type* prev_values, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool crossed = false; float crossing_time; if (i<size) { // Test for threshold crossing const auto cv = cv_index[i]; const auto cell = cv_to_cell[cv]; const auto v_prev = prev_values[i]; const auto v = values[cv]; const auto thresh = thresholds[i]; if (!is_crossed[i]) { if (v>=thresh) { // The threshold has been passed, so estimate the time using // linear interpolation auto pos = (thresh - v_prev)/(v - v_prev); crossing_time = lerp(t_before[cell], t_after[cell], pos); is_crossed[i] = 1; crossed = true; } } else if (v<thresh) { is_crossed[i]=0; } prev_values[i] = v; } if (crossed) { push_back(stack, {fvm_size_type(i), crossing_time}); } } __global__ extern void reset_crossed_impl( int size, fvm_index_type* is_crossed, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i<size) { is_crossed[i] = values[cv_index[i]] >= thresholds[i]; } } } // namespace kernel void test_thresholds_impl( int size, const fvm_index_type* cv_to_cell, const fvm_value_type* t_after, const fvm_value_type* t_before, stack_storage<threshold_crossing>& stack, fvm_index_type* is_crossed, fvm_value_type* prev_values, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { if (size>0) { constexpr int block_dim = 128; const int grid_dim = impl::block_count(size, block_dim); hipLaunchKernelGGL(( kernel::test_thresholds_impl), dim3(grid_dim), dim3(block_dim), 0, 0, size, cv_to_cell, t_after, t_before, stack, is_crossed, prev_values, cv_index, values, thresholds); } } void reset_crossed_impl( int size, fvm_index_type* is_crossed, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { if (size>0) { constexpr int block_dim = 128; const int grid_dim = impl::block_count(size, block_dim); hipLaunchKernelGGL(( kernel::reset_crossed_impl), dim3(grid_dim), dim3(block_dim), 0, 0, size, is_crossed, cv_index, values, thresholds); } } } // namespace gpu } // namespace arb
c6db9d64af24505e075aad47b43b1e5e9bb8f062.cu
#include <cmath> #include <arbor/fvm_types.hpp> #include "backends/threshold_crossing.hpp" #include "cuda_common.hpp" #include "stack_cu.hpp" namespace arb { namespace gpu { namespace kernel { template <typename T> __device__ inline T lerp(T a, T b, T u) { return std::fma(u, b, std::fma(-u, a, a)); } /// kernel used to test for threshold crossing test code. /// params: /// t : current time (ms) /// t_prev : time of last test (ms) /// size : number of values to test /// is_crossed : crossing state at time t_prev (true or false) /// prev_values : values at sample points (see index) sampled at t_prev /// index : index with locations in values to test for crossing /// values : values at t_prev /// thresholds : threshold values to watch for crossings __global__ void test_thresholds_impl( int size, const fvm_index_type* cv_to_cell, const fvm_value_type* t_after, const fvm_value_type* t_before, stack_storage<threshold_crossing>& stack, fvm_index_type* is_crossed, fvm_value_type* prev_values, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { int i = threadIdx.x + blockIdx.x*blockDim.x; bool crossed = false; float crossing_time; if (i<size) { // Test for threshold crossing const auto cv = cv_index[i]; const auto cell = cv_to_cell[cv]; const auto v_prev = prev_values[i]; const auto v = values[cv]; const auto thresh = thresholds[i]; if (!is_crossed[i]) { if (v>=thresh) { // The threshold has been passed, so estimate the time using // linear interpolation auto pos = (thresh - v_prev)/(v - v_prev); crossing_time = lerp(t_before[cell], t_after[cell], pos); is_crossed[i] = 1; crossed = true; } } else if (v<thresh) { is_crossed[i]=0; } prev_values[i] = v; } if (crossed) { push_back(stack, {fvm_size_type(i), crossing_time}); } } __global__ extern void reset_crossed_impl( int size, fvm_index_type* is_crossed, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i<size) { is_crossed[i] = values[cv_index[i]] >= thresholds[i]; } } } // namespace kernel void test_thresholds_impl( int size, const fvm_index_type* cv_to_cell, const fvm_value_type* t_after, const fvm_value_type* t_before, stack_storage<threshold_crossing>& stack, fvm_index_type* is_crossed, fvm_value_type* prev_values, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { if (size>0) { constexpr int block_dim = 128; const int grid_dim = impl::block_count(size, block_dim); kernel::test_thresholds_impl<<<grid_dim, block_dim>>>( size, cv_to_cell, t_after, t_before, stack, is_crossed, prev_values, cv_index, values, thresholds); } } void reset_crossed_impl( int size, fvm_index_type* is_crossed, const fvm_index_type* cv_index, const fvm_value_type* values, const fvm_value_type* thresholds) { if (size>0) { constexpr int block_dim = 128; const int grid_dim = impl::block_count(size, block_dim); kernel::reset_crossed_impl<<<grid_dim, block_dim>>>(size, is_crossed, cv_index, values, thresholds); } } } // namespace gpu } // namespace arb
b19aa5bcdb1960b161cbd4b83ddc3134de8c6127.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong */ #include "common_magma.h" #include "batched_kernel_param.h" //================================================================================================= //================================================================================================= // AUXILIARY ROUTINE TO COMPUTE PIV FINAL DESTINATION FOR THE CURRENT STEP //================================================================================================= //================================================================================================= //================================================================================================= static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; // initialize pivinfo (could be done in a separate kernel using multiple thread block for(int s =0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) ) pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1; } __syncthreads(); if(tid==0) { int i, itsreplacement, mynewrowid; for(i=0; i<nb; i++){ mynewrowid = ipiv[i]-1; //-1 to get the index in C itsreplacement = pivinfo[mynewrowid]; pivinfo[mynewrowid] = pivinfo[i]; pivinfo[i] = itsreplacement; } } } //================================================================================================= __global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int m, int nb) { int batchid = blockIdx.x; setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid], m, nb); } //================================================================================================= //================================================================================================= __global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb) { setup_pivinfo_devfunc(pivinfo, ipiv, m, nb); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array, magma_int_t m, magma_int_t nb, magma_int_t batchCount, magma_queue_t queue) { if(nb == 0 ) return ; hipLaunchKernelGGL(( setup_pivinfo_kernel_batched), dim3(batchCount), dim3(min(m, MAX_NTHREADS)), 0, queue, pivinfo_array, ipiv_array, m, nb); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv, magma_int_t m, magma_int_t nb, magma_queue_t queue) { if(nb == 0 ) return ; hipLaunchKernelGGL(( setup_pivinfo_kernel), dim3(1), dim3(min(m, MAX_NTHREADS)), 0, queue, pivinfo, ipiv, m, nb); } //================================================================================================= //================================================================================================= //================================================================================================= // AUXILIARY ROUTINE TO ADJUST IPIV //================================================================================================= //================================================================================================= //================================================================================================= static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset) { int tid = threadIdx.x; if(tid < m) { ipiv[tid] += offset; } } //================================================================================================= __global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int m, int offset) { int batchid = blockIdx.x; adjust_ipiv_devfunc(ipiv_array[batchid], m, offset); } //================================================================================================= //================================================================================================= __global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset) { adjust_ipiv_devfunc(ipiv, m, offset); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void adjust_ipiv_batched( magma_int_t **ipiv_array, magma_int_t m, magma_int_t offset, magma_int_t batchCount, magma_queue_t queue) { if(offset == 0 ) return ; if( m > MAX_NTHREADS) { printf(" adjust_ipiv_batched_q m=%d > %d, not supported \n", m, MAX_NTHREADS); return; } hipLaunchKernelGGL(( adjust_ipiv_kernel_batched), dim3(batchCount), dim3(m), 0, queue, ipiv_array, m, offset); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void adjust_ipiv( magma_int_t *ipiv, magma_int_t m, magma_int_t offset, magma_queue_t queue) { if(offset == 0 ) return ; if( m > 1024) { printf(" adjust_ipiv_q m=%d > %d, not supported \n", m, MAX_NTHREADS); return; } hipLaunchKernelGGL(( adjust_ipiv_kernel), dim3(1), dim3(m), 0, queue, ipiv, m, offset); }
b19aa5bcdb1960b161cbd4b83ddc3134de8c6127.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong */ #include "common_magma.h" #include "batched_kernel_param.h" //================================================================================================= //================================================================================================= // AUXILIARY ROUTINE TO COMPUTE PIV FINAL DESTINATION FOR THE CURRENT STEP //================================================================================================= //================================================================================================= //================================================================================================= static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; // initialize pivinfo (could be done in a separate kernel using multiple thread block for(int s =0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) ) pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1; } __syncthreads(); if(tid==0) { int i, itsreplacement, mynewrowid; for(i=0; i<nb; i++){ mynewrowid = ipiv[i]-1; //-1 to get the index in C itsreplacement = pivinfo[mynewrowid]; pivinfo[mynewrowid] = pivinfo[i]; pivinfo[i] = itsreplacement; } } } //================================================================================================= __global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int m, int nb) { int batchid = blockIdx.x; setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid], m, nb); } //================================================================================================= //================================================================================================= __global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb) { setup_pivinfo_devfunc(pivinfo, ipiv, m, nb); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array, magma_int_t m, magma_int_t nb, magma_int_t batchCount, magma_queue_t queue) { if(nb == 0 ) return ; setup_pivinfo_kernel_batched<<<batchCount, min(m, MAX_NTHREADS), 0, queue>>>(pivinfo_array, ipiv_array, m, nb); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv, magma_int_t m, magma_int_t nb, magma_queue_t queue) { if(nb == 0 ) return ; setup_pivinfo_kernel<<<1, min(m, MAX_NTHREADS), 0, queue>>>(pivinfo, ipiv, m, nb); } //================================================================================================= //================================================================================================= //================================================================================================= // AUXILIARY ROUTINE TO ADJUST IPIV //================================================================================================= //================================================================================================= //================================================================================================= static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset) { int tid = threadIdx.x; if(tid < m) { ipiv[tid] += offset; } } //================================================================================================= __global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int m, int offset) { int batchid = blockIdx.x; adjust_ipiv_devfunc(ipiv_array[batchid], m, offset); } //================================================================================================= //================================================================================================= __global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset) { adjust_ipiv_devfunc(ipiv, m, offset); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void adjust_ipiv_batched( magma_int_t **ipiv_array, magma_int_t m, magma_int_t offset, magma_int_t batchCount, magma_queue_t queue) { if(offset == 0 ) return ; if( m > MAX_NTHREADS) { printf(" adjust_ipiv_batched_q m=%d > %d, not supported \n", m, MAX_NTHREADS); return; } adjust_ipiv_kernel_batched<<<batchCount, m, 0, queue>>>(ipiv_array, m, offset); } //================================================================================================= //TODO add description //================================================================================================= extern "C" void adjust_ipiv( magma_int_t *ipiv, magma_int_t m, magma_int_t offset, magma_queue_t queue) { if(offset == 0 ) return ; if( m > 1024) { printf(" adjust_ipiv_q m=%d > %d, not supported \n", m, MAX_NTHREADS); return; } adjust_ipiv_kernel<<<1, m, 0, queue>>>(ipiv, m, offset); }
8461760c4291917bf42ed579f5b8a152a0e98e75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void loop() { /* * This idiomatic expression gives each thread * a unique index within the entire grid. */ int i = blockIdx.x * blockDim.x + threadIdx.x; printf("%d\n", i); } int main() { /* * Additional execution configurations that would * work and meet the exercises contraints are: * * <<<5, 2>>> * <<<10, 1>>> */ hipLaunchKernelGGL(( loop), dim3(2), dim3(5), 0, 0, ); hipDeviceSynchronize(); }
8461760c4291917bf42ed579f5b8a152a0e98e75.cu
#include <stdio.h> __global__ void loop() { /* * This idiomatic expression gives each thread * a unique index within the entire grid. */ int i = blockIdx.x * blockDim.x + threadIdx.x; printf("%d\n", i); } int main() { /* * Additional execution configurations that would * work and meet the exercises contraints are: * * <<<5, 2>>> * <<<10, 1>>> */ loop<<<2, 5>>>(); cudaDeviceSynchronize(); }
closest_hit.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh" // Root object of the scene. rtDeclareVariable(rtObject, root, , ); // The current ray and its payload. rtDeclareVariable(optix::Ray, ray, rtCurrentRay , ); rtDeclareVariable(RayPayload, payload, rtPayload, ); // The distance from the ray origin to where the intersection was detected. rtDeclareVariable(float, ray_t, rtIntersectionDistance, ); // Point lights in the scene. rtBuffer<PointLight> lights; // Properties of the hit surface's material. rtDeclareVariable(float3, mat_color , , ); rtDeclareVariable(float, mat_emission , , ); // 0 = no emission , 1 = full emission rtDeclareVariable(float, mat_metalness , , ); // 0 = dieletric , 1 = metal rtDeclareVariable(float, mat_shininess , , ); // 0 = smeared highlight , 1 = dense highlight rtDeclareVariable(float, mat_transparency , , ); // 0 = opaque , 1 = transparent rtDeclareVariable(float, mat_reflectivity , , ); // 0 = diffuse , 1 = mirror rtDeclareVariable(float, mat_fresnel , , ); // 0 = only absorptions , 1 = only reflections (when looking from directly above) rtDeclareVariable(float, mat_refractive_index, , ); // 1.0 = air material // Attributes from intersection test. rtDeclareVariable(optix::float3, attr_geo_normal, attribute GEO_NORMAL, ); rtDeclareVariable(optix::float3, attr_tangent, attribute TANGENT , ); rtDeclareVariable(optix::float3, attr_normal, attribute NORMAL , ); rtDeclareVariable(optix::float3, attr_uv, attribute TEX_UV , ); RT_FUNCTION float fresnel(float wo_dot_h) { float F = mat_fresnel + (1.0f - mat_fresnel) * pow(1.0f - wo_dot_h, 5.0f); return F; } RT_FUNCTION float torrance_sparrow_brdf(float n_dot_wi, float n_dot_wo, float n_dot_wh, float wo_dot_wh) { float F = fresnel(wo_dot_wh); float D = ((mat_shininess + 2.0f) / (2.0f * M_PIf)) * pow(n_dot_wh, mat_shininess); float G = min(1.0f, min(2.0f * n_dot_wh * n_dot_wo / wo_dot_wh, 2.0f * n_dot_wh * n_dot_wi / wo_dot_wh)); float denominator = 4.0f * n_dot_wo * n_dot_wi; return F * D * G / denominator; } RT_FUNCTION float3 direct_illumination(float3 const& wo, float3 const& hit, float3 const& n) { float3 illumination = make_float3(0.0f); // Do not illuminate the backface of a triangle. float n_dot_wo = optix::dot(n, wo); if (n_dot_wo <= 0.0f) { return illumination; } for (int i = 0; i < lights.size(); i++) { PointLight light = lights[i]; // Ensure that the light could illuminate the front face. float3 wi = optix::normalize(light.position - hit); float n_dot_wi = optix::dot(n, wi); if (n_dot_wi <= 0.0f) { continue; } // Setup a shadow ray from the hit/intersection point, towards the current point light. float dist_to_light = optix::length(light.position - hit); optix::Ray shadow_ray(hit, wi, 1, EPSILON, dist_to_light); // Shoot the shadow ray ShadowRayPayload shadow_payload; shadow_payload.attenuation = make_float3(1.0f); rtTrace(root, shadow_ray, shadow_payload); // Ensure that the shadow ray is not entirely absorbed along the way. if (fmaxf(shadow_payload.attenuation) <= 0.0f) { continue; } // Compute direct illumination from the light float3 light_illumination = shadow_payload.attenuation * (light.intensity / pow(dist_to_light, 2.0f)) * light.color; // Compute BRDF float3 wh = optix::normalize(wo + wi); float n_dot_wh = optix::dot(n, wh); float wo_dot_wh = optix::dot(wo, wh); float F = fresnel(wo_dot_wh); float D = ((mat_shininess + 2.0f) / (2.0f * M_PIf)) * pow(n_dot_wh, mat_shininess); float G = min(1.0f, min(2.0f * n_dot_wh * n_dot_wo / wo_dot_wh, 2.0f * n_dot_wh * n_dot_wi / wo_dot_wh)); float denominator = 4.0f * n_dot_wo * n_dot_wi; float brdf = F * D * G / denominator; // Material models float3 diffuse_model = mat_color * M_1_PIf * n_dot_wi * light_illumination; float3 dieletric_model = brdf * n_dot_wi * light_illumination + (1.0f - F) * diffuse_model; float3 metal_model = brdf * mat_color * n_dot_wi * light_illumination; float3 microfacet_model = mat_metalness * metal_model + (1.0f - mat_metalness) * dieletric_model; // Apply a linear blend between a perfectly diffuse surface and a microfacet brdf. float3 material_model = mat_reflectivity * microfacet_model + (1.0f - mat_reflectivity) * diffuse_model; illumination += material_model; } return illumination; } RT_FUNCTION float3 indirect_illumination(float3 const& wo, float3 const& hit, float3 const& n) { float3 illumination = make_float3(0.0f); // Indirect illumination from reflections if (0.0f < mat_reflectivity && payload.recursion_depth < 3) { float3 wi = optix::reflect(ray.direction, n); float3 wh = optix::normalize(wo + wi); // Fresnel float wo_dot_wh = max(0.01f, optix::dot(wo, wh)); float F = mat_reflectivity * fresnel(wo_dot_wh); float importance = payload.importance * optix::luminance(make_float3(F)); float importance_threshold = 0.1f; if (importance_threshold <= importance) { // Setup a reflection ray from the hit/intersection point optix::Ray reflection_ray(hit, wi, 0, EPSILON, RT_DEFAULT_MAX); // Shoot the reflection ray RayPayload reflection_payload; reflection_payload.importance = importance; reflection_payload.recursion_depth = payload.recursion_depth + 1; rtTrace(root, reflection_ray, reflection_payload); float3 mirror_model = F * reflection_payload.radiance; float3 metal_model = mat_color * F * reflection_payload.radiance; float3 material_model = mat_metalness * metal_model + (1.0f - mat_metalness) * mirror_model; illumination += material_model; } } // Indirect illumination from refractions if (0.0f < mat_transparency && payload.recursion_depth < 5) { float3 wi; bool total_internal_reflection = !optix::refract(wi, ray.direction, n, mat_refractive_index); if (total_internal_reflection) { wi = optix::reflect(ray.direction, n); } // External or internal? float cos_theta = optix::dot(ray.direction, n); if (cos_theta < 0.0f) { // wi comes from the outside air cos_theta = optix::dot(wo, n); } else { // wi comes from the inside of the material cos_theta = optix::dot(wi, n); } float F = fresnel(cos_theta); float importance = payload.importance * (1.0f - F) * optix::luminance(make_float3(1.0f)); const float importance_threshold = 0.1f; if (importance_threshold <= importance) { optix::Ray refraction_ray(hit, wi, 0, EPSILON, RT_DEFAULT_MAX); // Shoot the refraction ray RayPayload refraction_payload; refraction_payload.importance = payload.importance; refraction_payload.recursion_depth = payload.recursion_depth + 1; rtTrace(root, refraction_ray, refraction_payload); illumination += mat_transparency * (1.0f - F) * refraction_payload.radiance; } } return illumination; } RT_PROGRAM void closest_hit() { // Transform the (unnormalized) object space normals into world space. float3 geo_normal = optix::normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, attr_geo_normal)); float3 normal = optix::normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, attr_normal)); float3 wo = -ray.direction; float3 hit = ray.origin + ray_t * ray.direction; // Base color, regardless if the surface is exposed to light or not. float3 color = make_float3(0.0f); color += mat_color * mat_emission; color += direct_illumination(wo, hit, normal); color += indirect_illumination(wo, hit, normal); payload.radiance = color; }
closest_hit.cu
#include "common.cuh" // Root object of the scene. rtDeclareVariable(rtObject, root, , ); // The current ray and its payload. rtDeclareVariable(optix::Ray, ray, rtCurrentRay , ); rtDeclareVariable(RayPayload, payload, rtPayload, ); // The distance from the ray origin to where the intersection was detected. rtDeclareVariable(float, ray_t, rtIntersectionDistance, ); // Point lights in the scene. rtBuffer<PointLight> lights; // Properties of the hit surface's material. rtDeclareVariable(float3, mat_color , , ); rtDeclareVariable(float, mat_emission , , ); // 0 = no emission , 1 = full emission rtDeclareVariable(float, mat_metalness , , ); // 0 = dieletric , 1 = metal rtDeclareVariable(float, mat_shininess , , ); // 0 = smeared highlight , 1 = dense highlight rtDeclareVariable(float, mat_transparency , , ); // 0 = opaque , 1 = transparent rtDeclareVariable(float, mat_reflectivity , , ); // 0 = diffuse , 1 = mirror rtDeclareVariable(float, mat_fresnel , , ); // 0 = only absorptions , 1 = only reflections (when looking from directly above) rtDeclareVariable(float, mat_refractive_index, , ); // 1.0 = air material // Attributes from intersection test. rtDeclareVariable(optix::float3, attr_geo_normal, attribute GEO_NORMAL, ); rtDeclareVariable(optix::float3, attr_tangent, attribute TANGENT , ); rtDeclareVariable(optix::float3, attr_normal, attribute NORMAL , ); rtDeclareVariable(optix::float3, attr_uv, attribute TEX_UV , ); RT_FUNCTION float fresnel(float wo_dot_h) { float F = mat_fresnel + (1.0f - mat_fresnel) * pow(1.0f - wo_dot_h, 5.0f); return F; } RT_FUNCTION float torrance_sparrow_brdf(float n_dot_wi, float n_dot_wo, float n_dot_wh, float wo_dot_wh) { float F = fresnel(wo_dot_wh); float D = ((mat_shininess + 2.0f) / (2.0f * M_PIf)) * pow(n_dot_wh, mat_shininess); float G = min(1.0f, min(2.0f * n_dot_wh * n_dot_wo / wo_dot_wh, 2.0f * n_dot_wh * n_dot_wi / wo_dot_wh)); float denominator = 4.0f * n_dot_wo * n_dot_wi; return F * D * G / denominator; } RT_FUNCTION float3 direct_illumination(float3 const& wo, float3 const& hit, float3 const& n) { float3 illumination = make_float3(0.0f); // Do not illuminate the backface of a triangle. float n_dot_wo = optix::dot(n, wo); if (n_dot_wo <= 0.0f) { return illumination; } for (int i = 0; i < lights.size(); i++) { PointLight light = lights[i]; // Ensure that the light could illuminate the front face. float3 wi = optix::normalize(light.position - hit); float n_dot_wi = optix::dot(n, wi); if (n_dot_wi <= 0.0f) { continue; } // Setup a shadow ray from the hit/intersection point, towards the current point light. float dist_to_light = optix::length(light.position - hit); optix::Ray shadow_ray(hit, wi, 1, EPSILON, dist_to_light); // Shoot the shadow ray ShadowRayPayload shadow_payload; shadow_payload.attenuation = make_float3(1.0f); rtTrace(root, shadow_ray, shadow_payload); // Ensure that the shadow ray is not entirely absorbed along the way. if (fmaxf(shadow_payload.attenuation) <= 0.0f) { continue; } // Compute direct illumination from the light float3 light_illumination = shadow_payload.attenuation * (light.intensity / pow(dist_to_light, 2.0f)) * light.color; // Compute BRDF float3 wh = optix::normalize(wo + wi); float n_dot_wh = optix::dot(n, wh); float wo_dot_wh = optix::dot(wo, wh); float F = fresnel(wo_dot_wh); float D = ((mat_shininess + 2.0f) / (2.0f * M_PIf)) * pow(n_dot_wh, mat_shininess); float G = min(1.0f, min(2.0f * n_dot_wh * n_dot_wo / wo_dot_wh, 2.0f * n_dot_wh * n_dot_wi / wo_dot_wh)); float denominator = 4.0f * n_dot_wo * n_dot_wi; float brdf = F * D * G / denominator; // Material models float3 diffuse_model = mat_color * M_1_PIf * n_dot_wi * light_illumination; float3 dieletric_model = brdf * n_dot_wi * light_illumination + (1.0f - F) * diffuse_model; float3 metal_model = brdf * mat_color * n_dot_wi * light_illumination; float3 microfacet_model = mat_metalness * metal_model + (1.0f - mat_metalness) * dieletric_model; // Apply a linear blend between a perfectly diffuse surface and a microfacet brdf. float3 material_model = mat_reflectivity * microfacet_model + (1.0f - mat_reflectivity) * diffuse_model; illumination += material_model; } return illumination; } RT_FUNCTION float3 indirect_illumination(float3 const& wo, float3 const& hit, float3 const& n) { float3 illumination = make_float3(0.0f); // Indirect illumination from reflections if (0.0f < mat_reflectivity && payload.recursion_depth < 3) { float3 wi = optix::reflect(ray.direction, n); float3 wh = optix::normalize(wo + wi); // Fresnel float wo_dot_wh = max(0.01f, optix::dot(wo, wh)); float F = mat_reflectivity * fresnel(wo_dot_wh); float importance = payload.importance * optix::luminance(make_float3(F)); float importance_threshold = 0.1f; if (importance_threshold <= importance) { // Setup a reflection ray from the hit/intersection point optix::Ray reflection_ray(hit, wi, 0, EPSILON, RT_DEFAULT_MAX); // Shoot the reflection ray RayPayload reflection_payload; reflection_payload.importance = importance; reflection_payload.recursion_depth = payload.recursion_depth + 1; rtTrace(root, reflection_ray, reflection_payload); float3 mirror_model = F * reflection_payload.radiance; float3 metal_model = mat_color * F * reflection_payload.radiance; float3 material_model = mat_metalness * metal_model + (1.0f - mat_metalness) * mirror_model; illumination += material_model; } } // Indirect illumination from refractions if (0.0f < mat_transparency && payload.recursion_depth < 5) { float3 wi; bool total_internal_reflection = !optix::refract(wi, ray.direction, n, mat_refractive_index); if (total_internal_reflection) { wi = optix::reflect(ray.direction, n); } // External or internal? float cos_theta = optix::dot(ray.direction, n); if (cos_theta < 0.0f) { // wi comes from the outside air cos_theta = optix::dot(wo, n); } else { // wi comes from the inside of the material cos_theta = optix::dot(wi, n); } float F = fresnel(cos_theta); float importance = payload.importance * (1.0f - F) * optix::luminance(make_float3(1.0f)); const float importance_threshold = 0.1f; if (importance_threshold <= importance) { optix::Ray refraction_ray(hit, wi, 0, EPSILON, RT_DEFAULT_MAX); // Shoot the refraction ray RayPayload refraction_payload; refraction_payload.importance = payload.importance; refraction_payload.recursion_depth = payload.recursion_depth + 1; rtTrace(root, refraction_ray, refraction_payload); illumination += mat_transparency * (1.0f - F) * refraction_payload.radiance; } } return illumination; } RT_PROGRAM void closest_hit() { // Transform the (unnormalized) object space normals into world space. float3 geo_normal = optix::normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, attr_geo_normal)); float3 normal = optix::normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, attr_normal)); float3 wo = -ray.direction; float3 hit = ray.origin + ray_t * ray.direction; // Base color, regardless if the surface is exposed to light or not. float3 color = make_float3(0.0f); color += mat_color * mat_emission; color += direct_illumination(wo, hit, normal); color += indirect_illumination(wo, hit, normal); payload.radiance = color; }
cfed17d403eef74264ea9da6257abea63413ea29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions mixed zc -> ds */ #include "common_magma.h" extern "C" __global__ void zcaxpycp_special(cuFloatComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B,hipDoubleComplex *W ) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; X += ibx+idt; R += ibx+idt; B += ibx+idt; W += ibx+idt; X[0] = MAGMA_Z_ADD( X[0], cuComplexFloatToDouble(R[0]) ); W[0] = B[0]; } extern "C" __global__ void zaxpycp_special(hipDoubleComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; X += ibx+idt; R += ibx+idt; B += ibx+idt; X[0] = MAGMA_Z_ADD( X[0], R[0] ); R[0] = B[0]; } extern "C" __global__ void zcaxpycp_generic(cuFloatComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B,hipDoubleComplex *W ) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; if( ( ibx + idt ) < M ) { X += ibx+idt; R += ibx+idt; B += ibx+idt; W += ibx+idt; } else{ X +=(M-1); R +=(M-1); B +=(M-1); W +=(M-1); } X[0] = MAGMA_Z_ADD( X[0], cuComplexFloatToDouble( R[0] ) ); W[0] = B[0]; } extern "C" __global__ void zaxpycp_generic(hipDoubleComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; if( ( ibx + idt ) < M ) { X += ibx+idt; R += ibx+idt; B += ibx+idt; } else{ X +=(M-1); R +=(M-1); B +=(M-1); } X[0] = MAGMA_Z_ADD( X[0], R[0] ); R[0] = B[0]; } extern "C" void magmablas_zcaxpycp(cuFloatComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B, hipDoubleComplex *W) { dim3 threads( 64, 1 ); dim3 grid(M/64+(M%64!=0),1); if( M %64 == 0 ) { hipLaunchKernelGGL(( zcaxpycp_special) , dim3(grid), dim3(threads), 0, magma_stream , R, X, M, B, W) ; } else{ hipLaunchKernelGGL(( zcaxpycp_generic) , dim3(grid), dim3(threads), 0, magma_stream , R, X, M, B, W) ; } } extern "C" void magmablas_zaxpycp(hipDoubleComplex *R, hipDoubleComplex *X, magma_int_t M, hipDoubleComplex *B) { dim3 threads( 64, 1 ); dim3 grid(M/64+(M%64!=0),1); if( M %64 == 0 ) { hipLaunchKernelGGL(( zaxpycp_special) , dim3(grid), dim3(threads), 0, magma_stream , R, X, M, B) ; } else{ hipLaunchKernelGGL(( zaxpycp_generic) , dim3(grid), dim3(threads), 0, magma_stream , R, X, M, B) ; } }
cfed17d403eef74264ea9da6257abea63413ea29.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions mixed zc -> ds */ #include "common_magma.h" extern "C" __global__ void zcaxpycp_special(cuFloatComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B,cuDoubleComplex *W ) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; X += ibx+idt; R += ibx+idt; B += ibx+idt; W += ibx+idt; X[0] = MAGMA_Z_ADD( X[0], cuComplexFloatToDouble(R[0]) ); W[0] = B[0]; } extern "C" __global__ void zaxpycp_special(cuDoubleComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; X += ibx+idt; R += ibx+idt; B += ibx+idt; X[0] = MAGMA_Z_ADD( X[0], R[0] ); R[0] = B[0]; } extern "C" __global__ void zcaxpycp_generic(cuFloatComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B,cuDoubleComplex *W ) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; if( ( ibx + idt ) < M ) { X += ibx+idt; R += ibx+idt; B += ibx+idt; W += ibx+idt; } else{ X +=(M-1); R +=(M-1); B +=(M-1); W +=(M-1); } X[0] = MAGMA_Z_ADD( X[0], cuComplexFloatToDouble( R[0] ) ); W[0] = B[0]; } extern "C" __global__ void zaxpycp_generic(cuDoubleComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B) { const magma_int_t ibx = blockIdx.x * 64; const magma_int_t idt = threadIdx.x; if( ( ibx + idt ) < M ) { X += ibx+idt; R += ibx+idt; B += ibx+idt; } else{ X +=(M-1); R +=(M-1); B +=(M-1); } X[0] = MAGMA_Z_ADD( X[0], R[0] ); R[0] = B[0]; } extern "C" void magmablas_zcaxpycp(cuFloatComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B, cuDoubleComplex *W) { dim3 threads( 64, 1 ); dim3 grid(M/64+(M%64!=0),1); if( M %64 == 0 ) { zcaxpycp_special <<< grid, threads, 0, magma_stream >>> ( R, X, M, B, W) ; } else{ zcaxpycp_generic <<< grid, threads, 0, magma_stream >>> ( R, X, M, B, W) ; } } extern "C" void magmablas_zaxpycp(cuDoubleComplex *R, cuDoubleComplex *X, magma_int_t M, cuDoubleComplex *B) { dim3 threads( 64, 1 ); dim3 grid(M/64+(M%64!=0),1); if( M %64 == 0 ) { zaxpycp_special <<< grid, threads, 0, magma_stream >>> ( R, X, M, B) ; } else{ zaxpycp_generic <<< grid, threads, 0, magma_stream >>> ( R, X, M, B) ; } }
404f27805d55189b168057ab1f324e9582057de1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> // includes, kernels #include <scan.cu> // defines prescanArray() //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(); #ifndef __DEVICE_EMULATION__ int num_test_iterations = 100; int num_elements = 1000000; // can support large, non-power-of-2 arrays! #else int num_test_iterations = 1; int num_elements = 10000; // can support large, non-power-of-2 arrays! #endif cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements); cutGetCmdLineArgumenti( argc, (const char**) argv, "i", &num_test_iterations); unsigned int mem_size = sizeof( float) * num_elements; unsigned int timerGPU, timerCPU; CUT_SAFE_CALL(cutCreateTimer(&timerCPU)); CUT_SAFE_CALL(cutCreateTimer(&timerGPU)); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = 1.0f;//(int)(10 * rand()/32768.f); } // compute reference solution float* reference = (float*) malloc( mem_size); cutStartTimer(timerCPU); for (int i = 0; i < num_test_iterations; i++) { computeGold( reference, h_data, num_elements); } cutStopTimer(timerCPU); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) ); // initialize all the other device arrays to be safe CUDA_SAFE_CALL( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) ); printf("Running parallel prefix sum (prescan) of %d elements\n", num_elements); printf("This version is work efficient (O(n) adds)\n"); printf("and has very few shared memory bank conflicts\n\n"); preallocBlockSums(num_elements); // run once to remove startup overhead prescanArray(d_odata, d_idata, num_elements); // Run the prescan cutStartTimer(timerGPU); for (int i = 0; i < num_test_iterations; i++) { //printf("prescanArray\n"); prescanArray(d_odata, d_idata, num_elements); } cutStopTimer(timerGPU); deallocBlockSums(); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements, hipMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "Average GPU execution time: %f ms\n", cutGetTimerValue(timerGPU) / num_test_iterations); printf( "CPU execution time: %f ms\n", cutGetTimerValue(timerCPU) / num_test_iterations); } printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n"); printf("http://www.gpgpu.org/developer/cudpp\n"); // cleanup memory cutDeleteTimer(timerCPU); cutDeleteTimer(timerGPU); free( h_data); free( reference); hipFree( d_odata); hipFree( d_idata); }
404f27805d55189b168057ab1f324e9582057de1.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> // includes, kernels #include <scan.cu> // defines prescanArray() //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(); #ifndef __DEVICE_EMULATION__ int num_test_iterations = 100; int num_elements = 1000000; // can support large, non-power-of-2 arrays! #else int num_test_iterations = 1; int num_elements = 10000; // can support large, non-power-of-2 arrays! #endif cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements); cutGetCmdLineArgumenti( argc, (const char**) argv, "i", &num_test_iterations); unsigned int mem_size = sizeof( float) * num_elements; unsigned int timerGPU, timerCPU; CUT_SAFE_CALL(cutCreateTimer(&timerCPU)); CUT_SAFE_CALL(cutCreateTimer(&timerGPU)); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = 1.0f;//(int)(10 * rand()/32768.f); } // compute reference solution float* reference = (float*) malloc( mem_size); cutStartTimer(timerCPU); for (int i = 0; i < num_test_iterations; i++) { computeGold( reference, h_data, num_elements); } cutStopTimer(timerCPU); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // initialize all the other device arrays to be safe CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) ); printf("Running parallel prefix sum (prescan) of %d elements\n", num_elements); printf("This version is work efficient (O(n) adds)\n"); printf("and has very few shared memory bank conflicts\n\n"); preallocBlockSums(num_elements); // run once to remove startup overhead prescanArray(d_odata, d_idata, num_elements); // Run the prescan cutStartTimer(timerGPU); for (int i = 0; i < num_test_iterations; i++) { //printf("prescanArray\n"); prescanArray(d_odata, d_idata, num_elements); } cutStopTimer(timerGPU); deallocBlockSums(); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "Average GPU execution time: %f ms\n", cutGetTimerValue(timerGPU) / num_test_iterations); printf( "CPU execution time: %f ms\n", cutGetTimerValue(timerCPU) / num_test_iterations); } printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n"); printf("http://www.gpgpu.org/developer/cudpp\n"); // cleanup memory cutDeleteTimer(timerCPU); cutDeleteTimer(timerGPU); free( h_data); free( reference); cudaFree( d_odata); cudaFree( d_idata); }
63476494eea2cb236d583db6530941637b4f9212.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bvh_common.h> #include "bvh_math.cuh" __global__ void computeMass_kernel(float * dst, float * mass0, float scale, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m0 = mass0[ind]; if( m0 < 1e5f ) dst[ind] = m0 * scale; } __global__ void useAllAnchoredVelocity_kernel(float3 * vel, float3 * anchoredVel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; vel[ind] = va; } __global__ void useAnchoredVelocity_kernel(float3 * vel, float3 * anchoredVel, uint * anchored, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; if(anchored[ind] > 0) vel[ind] = va; } __global__ void integrate_kernel(float3 * pos, float3 * vel, float3 * vela, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 anchoredVel = vela[ind]; vel[ind] = anchoredVel; float3_add_inplace(pos[ind], scale_float3_by(anchoredVel, dt)); } __global__ void updatePosition_kernel(float3 * pos, float3 * pos0, float3 * vel, float3 * anchoredVel, uint * anchor, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; if(anchor[ind] > 0) vel[ind] = va; else va = vel[ind]; pos0[ind] = pos[ind]; float3_add_inplace(pos[ind], scale_float3_by(va, dt)); } __global__ void integrate2_kernel(float3 * pos, float3 * vel, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = vel[ind]; float3_add_inplace(pos[ind], scale_float3_by(va, dt)); } __global__ void impulseForce_kernel(float3 * force, float3 * deltaVel, float * mass, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; /* * F = J / dt * J = m * dv */ float m = mass[ind]; if(m > 1e5f) force[ind] = make_float3(0.f, 0.f, 0.f); else force[ind] = scale_float3_by(deltaVel[ind], m / dt); } __global__ void computeEnergy_kernel(float * energy, float * mass, float3 * vel, float defaultNodeMass, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m = mass[ind]; if(m > 1e5f) m = defaultNodeMass; energy[ind] = float3_length2(vel[ind]) * m; } __global__ void computeLength_kernel(float * energy, float3 * vel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; energy[ind] = float3_length2(vel[ind]); } __global__ void zeroVelocity_kernel(float3 * vel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; vel[ind] = make_float3(0.f, 0.f, 0.f); } __global__ void setVelocity_kernel(float3 * deltaVel, float * mass, float x, float y, float z, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m = mass[ind]; if(m > 1e5f) deltaVel[ind] = make_float3(0.f, 0.f, 0.f); else deltaVel[ind] = make_float3(x, y, z); } __global__ void copyPosition_kernel(float3 * dst, float3 * src, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind] = src[ind]; } namespace masssystem { void computeMass(float * dst, float * mass0, float scale, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( computeMass_kernel), dim3(grid), dim3(block) , 0, 0, dst, mass0, scale, maxInd); } void useAnchoredVelocity(float3 * vel, float3 * anchoredVel, uint * anchored, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( useAnchoredVelocity_kernel), dim3(grid), dim3(block) , 0, 0, vel, anchoredVel, anchored, maxInd); } void useAllAnchoredVelocity(float3 * vel, float3 * anchoredVel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( useAllAnchoredVelocity_kernel), dim3(grid), dim3(block) , 0, 0, vel, anchoredVel, maxInd); } void integrate(float3 * pos, float3 * prePos, float3 * vel, float3 * anchoredVel, uint * anchor, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( updatePosition_kernel), dim3(grid), dim3(block) , 0, 0, pos, prePos, vel, anchoredVel, anchor, dt, maxInd); } void integrateAllAnchored(float3 * pos, float3 * vel, float3 * vela, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block) , 0, 0, pos, vel, vela, dt, maxInd); } void integrateSimple(float3 * pos, float3 * vel, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( integrate2_kernel), dim3(grid), dim3(block) , 0, 0, pos, vel, dt, maxInd); } void impulseForce(float3 * force, float3 * deltaVel, float * mass, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( impulseForce_kernel), dim3(grid), dim3(block) , 0, 0, force, deltaVel, mass, dt, maxInd); } void computeEnergy(float * dst, float * mass, float3 * vel, float defaultNodeMass, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( computeEnergy_kernel), dim3(grid), dim3(block) , 0, 0, dst, mass, vel, defaultNodeMass, maxInd); } void computeLength(float * dst, float3 * vel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( computeLength_kernel), dim3(grid), dim3(block) , 0, 0, dst, vel, maxInd); } void zeroVelocity(float3 * vel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( zeroVelocity_kernel), dim3(grid), dim3(block) , 0, 0, vel, maxInd); } void setVelocity(float3 * deltaVel, float * mass, float x, float y, float z, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( setVelocity_kernel), dim3(grid), dim3(block) , 0, 0, deltaVel, mass, x, y, z, maxInd); } void copyPosition(float3 * dst, float3 * src, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( copyPosition_kernel), dim3(grid), dim3(block) , 0, 0, dst, src, maxInd); } }
63476494eea2cb236d583db6530941637b4f9212.cu
#include <bvh_common.h> #include "bvh_math.cuh" __global__ void computeMass_kernel(float * dst, float * mass0, float scale, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m0 = mass0[ind]; if( m0 < 1e5f ) dst[ind] = m0 * scale; } __global__ void useAllAnchoredVelocity_kernel(float3 * vel, float3 * anchoredVel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; vel[ind] = va; } __global__ void useAnchoredVelocity_kernel(float3 * vel, float3 * anchoredVel, uint * anchored, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; if(anchored[ind] > 0) vel[ind] = va; } __global__ void integrate_kernel(float3 * pos, float3 * vel, float3 * vela, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 anchoredVel = vela[ind]; vel[ind] = anchoredVel; float3_add_inplace(pos[ind], scale_float3_by(anchoredVel, dt)); } __global__ void updatePosition_kernel(float3 * pos, float3 * pos0, float3 * vel, float3 * anchoredVel, uint * anchor, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = anchoredVel[ind]; if(anchor[ind] > 0) vel[ind] = va; else va = vel[ind]; pos0[ind] = pos[ind]; float3_add_inplace(pos[ind], scale_float3_by(va, dt)); } __global__ void integrate2_kernel(float3 * pos, float3 * vel, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float3 va = vel[ind]; float3_add_inplace(pos[ind], scale_float3_by(va, dt)); } __global__ void impulseForce_kernel(float3 * force, float3 * deltaVel, float * mass, float dt, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; /* * F = J / dt * J = m * dv */ float m = mass[ind]; if(m > 1e5f) force[ind] = make_float3(0.f, 0.f, 0.f); else force[ind] = scale_float3_by(deltaVel[ind], m / dt); } __global__ void computeEnergy_kernel(float * energy, float * mass, float3 * vel, float defaultNodeMass, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m = mass[ind]; if(m > 1e5f) m = defaultNodeMass; energy[ind] = float3_length2(vel[ind]) * m; } __global__ void computeLength_kernel(float * energy, float3 * vel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; energy[ind] = float3_length2(vel[ind]); } __global__ void zeroVelocity_kernel(float3 * vel, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; vel[ind] = make_float3(0.f, 0.f, 0.f); } __global__ void setVelocity_kernel(float3 * deltaVel, float * mass, float x, float y, float z, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; float m = mass[ind]; if(m > 1e5f) deltaVel[ind] = make_float3(0.f, 0.f, 0.f); else deltaVel[ind] = make_float3(x, y, z); } __global__ void copyPosition_kernel(float3 * dst, float3 * src, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind] = src[ind]; } namespace masssystem { void computeMass(float * dst, float * mass0, float scale, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); computeMass_kernel<<< grid, block >>>(dst, mass0, scale, maxInd); } void useAnchoredVelocity(float3 * vel, float3 * anchoredVel, uint * anchored, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); useAnchoredVelocity_kernel<<< grid, block >>>(vel, anchoredVel, anchored, maxInd); } void useAllAnchoredVelocity(float3 * vel, float3 * anchoredVel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); useAllAnchoredVelocity_kernel<<< grid, block >>>(vel, anchoredVel, maxInd); } void integrate(float3 * pos, float3 * prePos, float3 * vel, float3 * anchoredVel, uint * anchor, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); updatePosition_kernel<<< grid, block >>>(pos, prePos, vel, anchoredVel, anchor, dt, maxInd); } void integrateAllAnchored(float3 * pos, float3 * vel, float3 * vela, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); integrate_kernel<<< grid, block >>>(pos, vel, vela, dt, maxInd); } void integrateSimple(float3 * pos, float3 * vel, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); integrate2_kernel<<< grid, block >>>(pos, vel, dt, maxInd); } void impulseForce(float3 * force, float3 * deltaVel, float * mass, float dt, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); impulseForce_kernel<<< grid, block >>>(force, deltaVel, mass, dt, maxInd); } void computeEnergy(float * dst, float * mass, float3 * vel, float defaultNodeMass, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); computeEnergy_kernel<<< grid, block >>>(dst, mass, vel, defaultNodeMass, maxInd); } void computeLength(float * dst, float3 * vel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); computeLength_kernel<<< grid, block >>>(dst, vel, maxInd); } void zeroVelocity(float3 * vel, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); zeroVelocity_kernel<<< grid, block >>>(vel, maxInd); } void setVelocity(float3 * deltaVel, float * mass, float x, float y, float z, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); setVelocity_kernel<<< grid, block >>>(deltaVel, mass, x, y, z, maxInd); } void copyPosition(float3 * dst, float3 * src, uint maxInd) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(maxInd, 512); dim3 grid(nblk, 1, 1); copyPosition_kernel<<< grid, block >>>(dst, src, maxInd); } }
c9900e553abbbdd8f112c8950a33bf581fde39d3.hip
// !!! This is a file automatically generated by hipify!!! // File : YnLayerDeconvolutionalGpu.c // Brief : Implement methods. // DD-MM-YYYY : 02-08-2016 // Author : haittt #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "../include/YnLayerDeconvolutionalGpu.h" #include "../include/YnCudaGpu.h" #include "../include/YnGemmGpu.h" #include "../include/YnBlasGpu.h" #include "../include/YnImageGpu.h" #include "../include/YnUtil.h" } /**************** Define */ /**************** Macro */ /**************** Enum */ /**************** Struct */ /**************** Local variables */ /**************** Global variables */ /**************** Local Implement */ /**************** Implement */ YN_EXTERN_C void YnLayerDeconvolutionalGpuForward(tYnLayer layer, tYnNetworkState state) { int i; float *a; float *b; float *c; int out_h = YnLayerDeconvolutionalOutHeightGet(layer); int out_w = YnLayerDeconvolutionalOutWidthGet(layer); int size = out_h*out_w; int m = layer.size*layer.size*layer.n; int n = layer.h*layer.w; int k = layer.c; YnBlasGpuArrayFillValueSet(layer.outputGpu, layer.outputs * layer.batch, 1, 0); for (i = 0; i < layer.batch; i ++) { a = layer.filtersGpu; b = state.input + i * layer.c * layer.h * layer.w; c = layer.colImageGpu; YnGemmGpu(1, 0, m, n, k, 1, a, m, b, n, 0, c, n); YnImageGpuCol2Image(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.outputGpu + i * layer.n * size); } YnBlasGpuBiasAdd(layer.outputGpu, layer.biasesGpu, layer.batch, layer.n, size); YnActivationGpuOutputArrayCal(layer.outputGpu, layer.batch * layer.n * size, layer.activation); } YN_EXTERN_C void YnLayerDeconvolutionalGpuBackward(tYnLayer layer, tYnNetworkState state) { int m; int n; int k; float *a; float *b; float *c; float alpha = 1. / layer.batch; int out_h = YnLayerDeconvolutionalOutHeightGet(layer); int out_w = YnLayerDeconvolutionalOutWidthGet(layer); int size = out_h * out_w; int i; YnActivationGradientArrayCal(layer.outputGpu, size*layer.n*layer.batch, layer.activation, layer.deltaGpu); YnBlasArrayBackwardBias(layer.biasUpdatesGpu, layer.delta, layer.batch, layer.n, size); if (state.delta) memset(state.delta, 0, layer.batch * layer.h * layer.w * layer.c * sizeof(float)); for (i = 0; i < layer.batch; i ++) { m = layer.c; n = layer.size*layer.size*layer.n; k = layer.h*layer.w; a = state.input + i*m*n; b = layer.colImageGpu; c = layer.filterUpdatesGpu; YnImageGpuImage2Col(layer.deltaGpu + i * layer.n * size, layer.n, out_h, out_w, layer.size, layer.stride, 0, b); YnGemmGpu(0, 1, m, n, k, alpha, a, k ,b ,k ,1 ,c , n); if (state.delta) { m = layer.c; n = layer.h*layer.w; k = layer.size*layer.size*layer.n; a = layer.filtersGpu; b = layer.colImageGpu; c = state.delta + i * n * m; YnGemm(0, 0, m, n, k, 1, a, k, b ,n ,1 ,c , n); } } } YN_EXTERN_C void YnLayerDeconvolutionalGpuPull(tYnLayer layer) { YnCudaArrayPullFromGpu(layer.filtersGpu, layer.filters, layer.c*layer.n*layer.size*layer.size); YnCudaArrayPullFromGpu(layer.biasesGpu, layer.biases, layer.n); YnCudaArrayPullFromGpu(layer.filterUpdatesGpu, layer.filterUpdates, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPullFromGpu(layer.biasUpdatesGpu, layer.biasUpdates, layer.n); } YN_EXTERN_C void YnLayerDeconvolutionalGpuPush(tYnLayer layer) { YnCudaArrayPushToGpu(layer.filtersGpu, layer.filters, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPushToGpu(layer.biasesGpu, layer.biases, layer.n); YnCudaArrayPushToGpu(layer.filterUpdatesGpu, layer.filterUpdates, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPushToGpu(layer.biasUpdatesGpu, layer.biasUpdates, layer.n); } YN_EXTERN_C void YnLayerDeconvolutionalGpuUpdate(tYnLayer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; YnBlasGpuArrayAxpyValueSet(layer.biasesGpu, layer.biasUpdatesGpu, layer.n, 1, 1, learning_rate); YnBlasGpuArrayScaleValueSet(layer.biasUpdatesGpu, layer.n, 1, momentum); YnBlasGpuArrayAxpyValueSet(layer.filterUpdatesGpu, layer.filtersGpu, size, 1, 1, - decay); YnBlasGpuArrayAxpyValueSet(layer.filtersGpu, layer.filterUpdatesGpu, size, 1, 1, learning_rate); YnBlasGpuArrayScaleValueSet(layer.filterUpdatesGpu, size, 1, momentum); }
c9900e553abbbdd8f112c8950a33bf581fde39d3.cu
// File : YnLayerDeconvolutionalGpu.c // Brief : Implement methods. // DD-MM-YYYY : 02-08-2016 // Author : haittt #include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "../include/YnLayerDeconvolutionalGpu.h" #include "../include/YnCudaGpu.h" #include "../include/YnGemmGpu.h" #include "../include/YnBlasGpu.h" #include "../include/YnImageGpu.h" #include "../include/YnUtil.h" } /**************** Define */ /**************** Macro */ /**************** Enum */ /**************** Struct */ /**************** Local variables */ /**************** Global variables */ /**************** Local Implement */ /**************** Implement */ YN_EXTERN_C void YnLayerDeconvolutionalGpuForward(tYnLayer layer, tYnNetworkState state) { int i; float *a; float *b; float *c; int out_h = YnLayerDeconvolutionalOutHeightGet(layer); int out_w = YnLayerDeconvolutionalOutWidthGet(layer); int size = out_h*out_w; int m = layer.size*layer.size*layer.n; int n = layer.h*layer.w; int k = layer.c; YnBlasGpuArrayFillValueSet(layer.outputGpu, layer.outputs * layer.batch, 1, 0); for (i = 0; i < layer.batch; i ++) { a = layer.filtersGpu; b = state.input + i * layer.c * layer.h * layer.w; c = layer.colImageGpu; YnGemmGpu(1, 0, m, n, k, 1, a, m, b, n, 0, c, n); YnImageGpuCol2Image(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.outputGpu + i * layer.n * size); } YnBlasGpuBiasAdd(layer.outputGpu, layer.biasesGpu, layer.batch, layer.n, size); YnActivationGpuOutputArrayCal(layer.outputGpu, layer.batch * layer.n * size, layer.activation); } YN_EXTERN_C void YnLayerDeconvolutionalGpuBackward(tYnLayer layer, tYnNetworkState state) { int m; int n; int k; float *a; float *b; float *c; float alpha = 1. / layer.batch; int out_h = YnLayerDeconvolutionalOutHeightGet(layer); int out_w = YnLayerDeconvolutionalOutWidthGet(layer); int size = out_h * out_w; int i; YnActivationGradientArrayCal(layer.outputGpu, size*layer.n*layer.batch, layer.activation, layer.deltaGpu); YnBlasArrayBackwardBias(layer.biasUpdatesGpu, layer.delta, layer.batch, layer.n, size); if (state.delta) memset(state.delta, 0, layer.batch * layer.h * layer.w * layer.c * sizeof(float)); for (i = 0; i < layer.batch; i ++) { m = layer.c; n = layer.size*layer.size*layer.n; k = layer.h*layer.w; a = state.input + i*m*n; b = layer.colImageGpu; c = layer.filterUpdatesGpu; YnImageGpuImage2Col(layer.deltaGpu + i * layer.n * size, layer.n, out_h, out_w, layer.size, layer.stride, 0, b); YnGemmGpu(0, 1, m, n, k, alpha, a, k ,b ,k ,1 ,c , n); if (state.delta) { m = layer.c; n = layer.h*layer.w; k = layer.size*layer.size*layer.n; a = layer.filtersGpu; b = layer.colImageGpu; c = state.delta + i * n * m; YnGemm(0, 0, m, n, k, 1, a, k, b ,n ,1 ,c , n); } } } YN_EXTERN_C void YnLayerDeconvolutionalGpuPull(tYnLayer layer) { YnCudaArrayPullFromGpu(layer.filtersGpu, layer.filters, layer.c*layer.n*layer.size*layer.size); YnCudaArrayPullFromGpu(layer.biasesGpu, layer.biases, layer.n); YnCudaArrayPullFromGpu(layer.filterUpdatesGpu, layer.filterUpdates, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPullFromGpu(layer.biasUpdatesGpu, layer.biasUpdates, layer.n); } YN_EXTERN_C void YnLayerDeconvolutionalGpuPush(tYnLayer layer) { YnCudaArrayPushToGpu(layer.filtersGpu, layer.filters, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPushToGpu(layer.biasesGpu, layer.biases, layer.n); YnCudaArrayPushToGpu(layer.filterUpdatesGpu, layer.filterUpdates, layer.c * layer.n * layer.size * layer.size); YnCudaArrayPushToGpu(layer.biasUpdatesGpu, layer.biasUpdates, layer.n); } YN_EXTERN_C void YnLayerDeconvolutionalGpuUpdate(tYnLayer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; YnBlasGpuArrayAxpyValueSet(layer.biasesGpu, layer.biasUpdatesGpu, layer.n, 1, 1, learning_rate); YnBlasGpuArrayScaleValueSet(layer.biasUpdatesGpu, layer.n, 1, momentum); YnBlasGpuArrayAxpyValueSet(layer.filterUpdatesGpu, layer.filtersGpu, size, 1, 1, - decay); YnBlasGpuArrayAxpyValueSet(layer.filtersGpu, layer.filterUpdatesGpu, size, 1, 1, learning_rate); YnBlasGpuArrayScaleValueSet(layer.filterUpdatesGpu, size, 1, momentum); }
resize_op.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/operators/resize_op.h" namespace caffe2 { namespace { __global__ void NearestNeighborKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int w = indexTemp % output_width; indexTemp /= output_width; const int h = indexTemp % output_height; indexTemp /= output_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int in_y = fminf(h / height_scale, input_height - 1); const int in_x = fminf(w / width_scale, input_width - 1); Y[index] = X[((n * num_channels + c) * input_height + in_y) * input_width + in_x]; } } __global__ void NearestNeighborGradientKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* dY, float* dX) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int x = indexTemp % input_width; indexTemp /= input_width; const int y = indexTemp % input_height; indexTemp /= input_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int out_y = fminf(y / height_scale, output_height - 1); const int out_x = fminf(x / width_scale, output_width - 1); const int out_index = ((n * num_channels + c) * output_height + out_y) * output_width + out_x; #if __CUDA_ARCH__ >= 350 atomicAdd(dX + out_index, __ldg(dY + index)); #else atomicAdd(dX + out_index, *(dY + index)); #endif } } } // namespace template <> bool ResizeNearestOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto inputDims = X.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = X.dim32(0), num_channels = X.dim32(1), input_height = X.dim32(2), input_width = X.dim32(3); if (InputSize() == 2) { const auto& scales = Input(1); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } int output_width = input_width * width_scale_; int output_height = input_height * height_scale_; auto* Y = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); const auto size = Y->size(); hipLaunchKernelGGL(( NearestNeighborKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, X.data<float>(), Y->template mutable_data<float>()); return true; } template <> bool ResizeNearestGradientOp<float, CUDAContext>::RunOnDevice() { const auto& dY = Input(0); const auto& X = Input(1); const auto inputDims = dY.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = dY.dim32(0), num_channels = dY.dim32(1), input_height = dY.dim32(2), input_width = dY.dim32(3); int output_height = X.dim32(2); int output_width = X.dim32(3); if (InputSize() == 3) { const auto& scales = Input(2); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } auto* dX = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); math::Set<float, CUDAContext>( dX->size(), 0.0f, dX->template mutable_data<float>(), &context_); const auto size = dY.size(); hipLaunchKernelGGL(( NearestNeighborGradientKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, dY.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ResizeNearest, ResizeNearestOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ResizeNearestGradient, ResizeNearestGradientOp<float, CUDAContext>); } // namespace caffe2
resize_op.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/operators/resize_op.h" namespace caffe2 { namespace { __global__ void NearestNeighborKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int w = indexTemp % output_width; indexTemp /= output_width; const int h = indexTemp % output_height; indexTemp /= output_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int in_y = fminf(h / height_scale, input_height - 1); const int in_x = fminf(w / width_scale, input_width - 1); Y[index] = X[((n * num_channels + c) * input_height + in_y) * input_width + in_x]; } } __global__ void NearestNeighborGradientKernel( const int size, const int num_channels, const int input_height, const int input_width, const int output_height, const int output_width, const float height_scale, const float width_scale, const float* dY, float* dX) { CUDA_1D_KERNEL_LOOP(index, size) { int indexTemp = index; const int x = indexTemp % input_width; indexTemp /= input_width; const int y = indexTemp % input_height; indexTemp /= input_height; const int c = indexTemp % num_channels; indexTemp /= num_channels; const int n = indexTemp; const int out_y = fminf(y / height_scale, output_height - 1); const int out_x = fminf(x / width_scale, output_width - 1); const int out_index = ((n * num_channels + c) * output_height + out_y) * output_width + out_x; #if __CUDA_ARCH__ >= 350 atomicAdd(dX + out_index, __ldg(dY + index)); #else atomicAdd(dX + out_index, *(dY + index)); #endif } } } // namespace template <> bool ResizeNearestOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto inputDims = X.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = X.dim32(0), num_channels = X.dim32(1), input_height = X.dim32(2), input_width = X.dim32(3); if (InputSize() == 2) { const auto& scales = Input(1); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } int output_width = input_width * width_scale_; int output_height = input_height * height_scale_; auto* Y = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); const auto size = Y->size(); NearestNeighborKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, X.data<float>(), Y->template mutable_data<float>()); return true; } template <> bool ResizeNearestGradientOp<float, CUDAContext>::RunOnDevice() { const auto& dY = Input(0); const auto& X = Input(1); const auto inputDims = dY.sizes(); CAFFE_ENFORCE_EQ(4, inputDims.size()); const int batch_size = dY.dim32(0), num_channels = dY.dim32(1), input_height = dY.dim32(2), input_width = dY.dim32(3); int output_height = X.dim32(2); int output_width = X.dim32(3); if (InputSize() == 3) { const auto& scales = Input(2); CAFFE_ENFORCE_EQ(scales.dim(), 1); CAFFE_ENFORCE_EQ(scales.size(), 2); float scales_data[2]; context_.CopyToCPU<float>(2, scales.data<float>(), scales_data); height_scale_ = scales_data[0]; width_scale_ = scales_data[1]; } auto* dX = Output(0, {batch_size, num_channels, output_height, output_width}, at::dtype<float>()); math::Set<float, CUDAContext>( dX->size(), 0.0f, dX->template mutable_data<float>(), &context_); const auto size = dY.size(); NearestNeighborGradientKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, num_channels, input_height, input_width, output_height, output_width, height_scale_, width_scale_, dY.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ResizeNearest, ResizeNearestOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ResizeNearestGradient, ResizeNearestGradientOp<float, CUDAContext>); } // namespace caffe2
76e23ac0ed9e6ea86068335f973bdbd5d7e19639.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/TensorIterator.h> #include <aten/src/ATen/TensorUtils.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/hip/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda( input, target, weight, reduction, loss); } Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_owned_input(at::squeeze(input)) .add_owned_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = ::log(input_val); scalar_t log_1_minus_input_val = ::log(one - input_val); log_input_val = ::max(log_input_val, neg_100); log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda( grad, input, target, weight, reduction, grad_input); } Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } // ----------------------------------- // nll_loss // ----------------------------------- namespace { const int NLL_LOSS_THREADS = 32; #define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \ [&] { \ at::ScalarType _it = TYPE; \ RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \ switch (_it) { \ AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \ AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\ default: \ AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \ } \ }() template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_no_reduce_cuda_kernel( int64_t batch_size, PackedTensorAccessor64<scalar_t, 2> input, index_t* target, scalar_t* output, scalar_t* weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { output[index] = static_cast<scalar_t>(0); continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); auto cur_weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); output[index] = -cur_weight * input[index][cur_target]; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_1d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int n_classes, int64_t ignore_index) { CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); *output = -cur_weight * input[t]; *total_weight = cur_weight; if (size_average && *total_weight > 0) { *output /= *total_weight; } } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_2d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) __shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS], acc_weight[NLL_LOSS_THREADS]; sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0); acc_weight[threadIdx.x] = static_cast<accscalar_t>(0); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight; acc_weight[threadIdx.x] += cur_weight; } } __syncthreads(); if (threadIdx.x == 0) { accscalar_t output_acc = 0; accscalar_t total_weight_acc = 0; for (int i = 0; i < NLL_LOSS_THREADS; ++i) { output_acc += sh_inputs[i]; total_weight_acc += acc_weight[i]; } *total_weight = static_cast<scalar_t>(total_weight_acc); if (size_average && nframe == 0) { // Mean reduction on empty tensors produces NaN *output = std::numeric_limits<scalar_t>::quiet_NaN(); } else if (size_average && total_weight_acc != 0) { *output = static_cast<scalar_t>(output_acc / total_weight_acc); } else { *output = static_cast<scalar_t>(output_acc); } } } void nll_loss_forward_out_cuda_template( const Tensor& output, const Tensor& total_weight, const Tensor& input_, const Tensor& target_, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto input = *input_.expect_contiguous(); auto target = *target_.expect_contiguous(); int64_t n_classes = input.size(-1); int64_t n_dims = input.dim(); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == Reduction::None && n_dims == 2) { output.resize_({batch_size}); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel_index", [&] { hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>) , dim3(at::cuda::detail::GET_BLOCKS(batch_size)), dim3(at::cuda::detail::CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, input.packed_accessor64<scalar_t, 2>(), target.data_ptr<index_t>(), output.data_ptr<scalar_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return; } output.resize_({}); total_weight.resize_({}); if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d_index", [&] { hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>) , dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } else if (n_dims == 2) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d_index", [&] { using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>; hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>) , dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_no_reduce_cuda_kernel( int batch_size, index_t *target, PackedTensorAccessor64<scalar_t, 1> grad_output, PackedTensorAccessor64<scalar_t, 2> grad_input, scalar_t *weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); grad_input[index][cur_target] = -weight * grad_output[index]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_1d( scalar_t *grad_input, scalar_t *grad_output, scalar_t *weights, index_t *target, scalar_t *total_weight, bool size_average, int n_classes, int64_t ignore_index ) { if (*total_weight <= 0) { return; } scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_2d( scalar_t* grad_input, scalar_t* grad_output, index_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { if (*total_weight <= 0) { return; } scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[i * ndim + t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0]; } } }; void nll_loss_backward_out_cuda_template( const Tensor& grad_input_, const Tensor& grad_output_, const Tensor& input_, const Tensor& target_, const Tensor& total_weight, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto target = *target_.expect_contiguous(); auto input = *input_.expect_contiguous(); auto grad_input = *grad_input_.expect_contiguous(); auto grad_output = *grad_output_.expect_contiguous(); int64_t n_dims = input.dim(); int64_t n_classes = input.size(-1); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == at::Reduction::None && n_dims == 2) { check_dim_size(grad_output, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>) , dim3(at::cuda::detail::GET_BLOCKS(batch_size)), dim3(at::cuda::detail::CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, target.data_ptr<index_t>(), grad_output.packed_accessor64<scalar_t, 1>(), grad_input.packed_accessor64<scalar_t, 2>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return; } TORCH_CHECK(grad_output.numel() == 1); if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>) , dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, target.data_ptr<index_t>(), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d_index", [&] { hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>) , dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); } } #undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES } // namespace TORCH_IMPL_FUNC(nll_loss_forward_out_cuda) (const Tensor& self, const Tensor& target, const OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& output, const Tensor& total_weight) { const Tensor& weight = weight_opt.getTensorRef(); nll_loss_forward_out_cuda_template( output, total_weight, self, target, weight, reduction, ignore_index); } TORCH_IMPL_FUNC(nll_loss_backward_out_cuda) (const Tensor& grad_output, const Tensor& self, const Tensor& target, OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, const Tensor& grad_input) { const Tensor& weight = weight_opt.getTensorRef(); grad_input.zero_(); nll_loss_backward_out_cuda_template( grad_input, grad_output, self, target, total_weight, weight, reduction, ignore_index); } }} // namespace at::native
76e23ac0ed9e6ea86068335f973bdbd5d7e19639.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/TensorIterator.h> #include <aten/src/ATen/TensorUtils.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/cuda/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda( input, target, weight, reduction, loss); } Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_owned_input(at::squeeze(input)) .add_owned_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = std::log(input_val); scalar_t log_1_minus_input_val = std::log(one - input_val); log_input_val = std::max(log_input_val, neg_100); log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda( grad, input, target, weight, reduction, grad_input); } Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } // ----------------------------------- // nll_loss // ----------------------------------- namespace { const int NLL_LOSS_THREADS = 32; #define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \ [&] { \ at::ScalarType _it = TYPE; \ RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \ switch (_it) { \ AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \ AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\ default: \ AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \ } \ }() template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_no_reduce_cuda_kernel( int64_t batch_size, PackedTensorAccessor64<scalar_t, 2> input, index_t* target, scalar_t* output, scalar_t* weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { output[index] = static_cast<scalar_t>(0); continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); auto cur_weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); output[index] = -cur_weight * input[index][cur_target]; } } template <typename scalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_1d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int n_classes, int64_t ignore_index) { CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); *output = -cur_weight * input[t]; *total_weight = cur_weight; if (size_average && *total_weight > 0) { *output /= *total_weight; } } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void nll_loss_forward_reduce_cuda_kernel_2d( scalar_t* output, scalar_t* total_weight, scalar_t* input, index_t* target, scalar_t* weights, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) __shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS], acc_weight[NLL_LOSS_THREADS]; sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0); acc_weight[threadIdx.x] = static_cast<accscalar_t>(0); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); scalar_t cur_weight = weights != nullptr ? weights[t] : static_cast<scalar_t>(1); sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight; acc_weight[threadIdx.x] += cur_weight; } } __syncthreads(); if (threadIdx.x == 0) { accscalar_t output_acc = 0; accscalar_t total_weight_acc = 0; for (int i = 0; i < NLL_LOSS_THREADS; ++i) { output_acc += sh_inputs[i]; total_weight_acc += acc_weight[i]; } *total_weight = static_cast<scalar_t>(total_weight_acc); if (size_average && nframe == 0) { // Mean reduction on empty tensors produces NaN *output = std::numeric_limits<scalar_t>::quiet_NaN(); } else if (size_average && total_weight_acc != 0) { *output = static_cast<scalar_t>(output_acc / total_weight_acc); } else { *output = static_cast<scalar_t>(output_acc); } } } void nll_loss_forward_out_cuda_template( const Tensor& output, const Tensor& total_weight, const Tensor& input_, const Tensor& target_, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto input = *input_.expect_contiguous(); auto target = *target_.expect_contiguous(); int64_t n_classes = input.size(-1); int64_t n_dims = input.dim(); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == Reduction::None && n_dims == 2) { output.resize_({batch_size}); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_no_reduce_cuda_kernel_index", [&] { nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t> <<<at::cuda::detail::GET_BLOCKS(batch_size), at::cuda::detail::CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( batch_size, input.packed_accessor64<scalar_t, 2>(), target.data_ptr<index_t>(), output.data_ptr<scalar_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return; } output.resize_({}); total_weight.resize_({}); if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_1d_index", [&] { nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t> <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } else if (n_dims == 2) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_forward_reduce_cuda_kernel_2d_index", [&] { using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>; nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t> <<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr, reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } } template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_no_reduce_cuda_kernel( int batch_size, index_t *target, PackedTensorAccessor64<scalar_t, 1> grad_output, PackedTensorAccessor64<scalar_t, 2> grad_input, scalar_t *weights, int n_classes, int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) { int cur_target = target[index]; if (cur_target == ignore_index) { continue; } CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1); grad_input[index][cur_target] = -weight * grad_output[index]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_1d( scalar_t *grad_input, scalar_t *grad_output, scalar_t *weights, index_t *target, scalar_t *total_weight, bool size_average, int n_classes, int64_t ignore_index ) { if (*total_weight <= 0) { return; } scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1); int t = static_cast<int>(*target); if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0]; } }; template <typename scalar_t, typename index_t> __global__ void nll_loss_backward_reduce_cuda_kernel_2d( scalar_t* grad_input, scalar_t* grad_output, index_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int nframe, int ndim, int n_classes, int64_t ignore_index) { if (*total_weight <= 0) { return; } scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1); for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { int t = target[i]; if (t != static_cast<int>(ignore_index)) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); grad_input[i * ndim + t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0]; } } }; void nll_loss_backward_out_cuda_template( const Tensor& grad_input_, const Tensor& grad_output_, const Tensor& input_, const Tensor& target_, const Tensor& total_weight, const Tensor& weight, int64_t reduction, int64_t ignore_index) { auto target = *target_.expect_contiguous(); auto input = *input_.expect_contiguous(); auto grad_input = *grad_input_.expect_contiguous(); auto grad_output = *grad_output_.expect_contiguous(); int64_t n_dims = input.dim(); int64_t n_classes = input.size(-1); int64_t batch_size = n_dims == 1 ? 1 : input.size(0); auto weight_ = weight.defined() ? weight.contiguous() : weight; if (reduction == at::Reduction::None && n_dims == 2) { check_dim_size(grad_output, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_no_reduce_cuda_kernel_index", [&] { nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t> <<<at::cuda::detail::GET_BLOCKS(batch_size), at::cuda::detail::CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( batch_size, target.data_ptr<index_t>(), grad_output.packed_accessor64<scalar_t, 1>(), grad_input.packed_accessor64<scalar_t, 2>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return; } TORCH_CHECK(grad_output.numel() == 1); if (n_dims == 1) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_1d_index", [&] { nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t> <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, target.data_ptr<index_t>(), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d", [&] { AT_DISPATCH_NLL_LOSS_INDEX_TYPES( target.scalar_type(), "nll_loss_backward_reduce_cuda_kernel_2d_index", [&] { nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t> <<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target.data_ptr<index_t>(), weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr, total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(0), input.size(1), n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); } } #undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES } // namespace TORCH_IMPL_FUNC(nll_loss_forward_out_cuda) (const Tensor& self, const Tensor& target, const OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& output, const Tensor& total_weight) { const Tensor& weight = weight_opt.getTensorRef(); nll_loss_forward_out_cuda_template( output, total_weight, self, target, weight, reduction, ignore_index); } TORCH_IMPL_FUNC(nll_loss_backward_out_cuda) (const Tensor& grad_output, const Tensor& self, const Tensor& target, OptionalTensorRef weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, const Tensor& grad_input) { const Tensor& weight = weight_opt.getTensorRef(); grad_input.zero_(); nll_loss_backward_out_cuda_template( grad_input, grad_output, self, target, total_weight, weight, reduction, ignore_index); } }} // namespace at::native
761a3a8887351ffc4832488c94a2910324464533.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> // debug for srand parameter #include "kernel.hip" #include "kernel_CPU.C" #define N 256 #define ITERS 10 void createRandomCells(int *cells, int n) { srand(time(NULL)); for (int i = 0; i < n*n*n; i++) if ((float)rand() / (float)RAND_MAX > 0.5) cells[i] = 1; else cells[i] = 0; } // debug void printResult(int *cells, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { printf("%d ", cells[i*n*n + j*n + k]); } printf("\n"); } printf("\n\n"); } } int main(int argc, char **argv){ int *cells = NULL; // cells computed by CPU int *cellsGPU = NULL; // CPU buffer for GPU results int *dCells = NULL; // cells computed by GPU // parse command line int device = 0; if (argc == 2) device = atoi(argv[1]); if (hipSetDevice(device) != hipSuccess){ fprintf(stderr, "Cannot set CUDA device!\n"); exit(1); } hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); printf("Using device %d: \"%s\"\n", device, deviceProp.name); printf("Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); // added printf("Maximum shared memory per block: %d\n", (int)deviceProp.sharedMemPerBlock); // create events for timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate and set host memory cells = (int*)malloc(N*N*N*sizeof(cells[0])); cellsGPU = (int*)malloc(N*N*N*sizeof(cells[0])); createRandomCells(cells, N); // allocate and set device memory if (hipMalloc((void**)&dCells, N*N*N*sizeof(dCells[0])) != hipSuccess) { fprintf(stderr, "Device memory allocation error!\n"); goto cleanup; } hipMemcpy(dCells, cells, N*N*N*sizeof(dCells[0]), hipMemcpyHostToDevice); // solve on CPU printf("Solving on CPU...\n"); hipEventRecord(start, 0); solveCPU(&cells, N, ITERS); hipEventRecord(stop, 0); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); printf("CPU performance: %f megacells/s\n", float(N*N*N)*float(ITERS)/time/1e3f); //printResult(cells, N); // debug // dummy copy, just to awake GPU hipMemcpy(cellsGPU, dCells, N*N*N*sizeof(dCells[0]), hipMemcpyDeviceToHost); // solve on GPU printf("Solving on GPU...\n"); hipEventRecord(start, 0); solveGPU(&dCells, N, ITERS); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("GPU performance: %f megacells/s\n", float(N*N*N)*float(ITERS)/time/1e3f); // check GPU results hipMemcpy(cellsGPU, dCells, N*N*N*sizeof(dCells[0]), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) for (int k = 0; k < N; k++) if (cellsGPU[i*N*N + j*N + k] != cells[i*N*N + j*N + k]){ printf("Error detected at [%i, %i, %i]: %i should be %i.\n", i, j, k, cellsGPU[i*N*N + j*N + k], cells[i*N*N + j*N + k]); goto cleanup; // exit after the first error } printf("Test OK.\n"); cleanup: hipEventDestroy(start); hipEventDestroy(stop); if (dCells) hipFree(dCells); if (cells) free(cells); if (cellsGPU) free(cellsGPU); return 0; }
761a3a8887351ffc4832488c94a2910324464533.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> // debug for srand parameter #include "kernel.cu" #include "kernel_CPU.C" #define N 256 #define ITERS 10 void createRandomCells(int *cells, int n) { srand(time(NULL)); for (int i = 0; i < n*n*n; i++) if ((float)rand() / (float)RAND_MAX > 0.5) cells[i] = 1; else cells[i] = 0; } // debug void printResult(int *cells, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { printf("%d ", cells[i*n*n + j*n + k]); } printf("\n"); } printf("\n\n"); } } int main(int argc, char **argv){ int *cells = NULL; // cells computed by CPU int *cellsGPU = NULL; // CPU buffer for GPU results int *dCells = NULL; // cells computed by GPU // parse command line int device = 0; if (argc == 2) device = atoi(argv[1]); if (cudaSetDevice(device) != cudaSuccess){ fprintf(stderr, "Cannot set CUDA device!\n"); exit(1); } cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Using device %d: \"%s\"\n", device, deviceProp.name); printf("Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); // added printf("Maximum shared memory per block: %d\n", (int)deviceProp.sharedMemPerBlock); // create events for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate and set host memory cells = (int*)malloc(N*N*N*sizeof(cells[0])); cellsGPU = (int*)malloc(N*N*N*sizeof(cells[0])); createRandomCells(cells, N); // allocate and set device memory if (cudaMalloc((void**)&dCells, N*N*N*sizeof(dCells[0])) != cudaSuccess) { fprintf(stderr, "Device memory allocation error!\n"); goto cleanup; } cudaMemcpy(dCells, cells, N*N*N*sizeof(dCells[0]), cudaMemcpyHostToDevice); // solve on CPU printf("Solving on CPU...\n"); cudaEventRecord(start, 0); solveCPU(&cells, N, ITERS); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); printf("CPU performance: %f megacells/s\n", float(N*N*N)*float(ITERS)/time/1e3f); //printResult(cells, N); // debug // dummy copy, just to awake GPU cudaMemcpy(cellsGPU, dCells, N*N*N*sizeof(dCells[0]), cudaMemcpyDeviceToHost); // solve on GPU printf("Solving on GPU...\n"); cudaEventRecord(start, 0); solveGPU(&dCells, N, ITERS); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("GPU performance: %f megacells/s\n", float(N*N*N)*float(ITERS)/time/1e3f); // check GPU results cudaMemcpy(cellsGPU, dCells, N*N*N*sizeof(dCells[0]), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) for (int k = 0; k < N; k++) if (cellsGPU[i*N*N + j*N + k] != cells[i*N*N + j*N + k]){ printf("Error detected at [%i, %i, %i]: %i should be %i.\n", i, j, k, cellsGPU[i*N*N + j*N + k], cells[i*N*N + j*N + k]); goto cleanup; // exit after the first error } printf("Test OK.\n"); cleanup: cudaEventDestroy(start); cudaEventDestroy(stop); if (dCells) cudaFree(dCells); if (cells) free(cells); if (cellsGPU) free(cellsGPU); return 0; }
b36510b3be42957cf7f2fcebdb57963f0590ba6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <IL/il.h> using namespace std; __global__ void sobel(unsigned char *data,unsigned char *out,std::size_t rows, std::size_t cols){ auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; int h,v,res; if(idx > 0 && idx < (rows-1) && idy > 0 && idy < (cols-1)){ for(int c = 0 ; c < 3 ; ++c) { // Horizontal h = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx - 1) * cols + idy + 1) * 3 + c] + 2 * data[( idx * cols + idy - 1) * 3 + c] - 2 * data[( idx * cols + idy + 1) * 3 + c] + data[((idx + 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c]; // Vertical v = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy - 1) * 3 + c] + 2 * data[((idx - 1) * cols + idy ) * 3 + c] - 2 * data[((idx + 1) * cols + idy ) * 3 + c] + data[((idx - 1) * cols + idy + 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c]; res = h*h + v*v; res = res > 255*255 ? 255*255 : res; out[(idx * cols + idy) * 3 + c] = sqrtf(res); } } } int main() { unsigned int image; ilInit(); ilGenImages(1, &image); ilBindImage(image); ilLoadImage("4v9mo.jpg"); auto cols = ilGetInteger(IL_IMAGE_WIDTH); auto rows = ilGetInteger(IL_IMAGE_HEIGHT); auto bpp = ilGetInteger(IL_IMAGE_BYTES_PER_PIXEL); // Rcupration des donnes de l'image unsigned char* data = ilGetData(); auto size_img = cols * rows * bpp; //Traitement de l'image unsigned char* out = (unsigned char*)malloc(size_img); unsigned char* out_d; unsigned char* data_d; hipMalloc(&out_d,size_img); hipMalloc(&data_d,size_img); hipMemcpy(data_d,data,size_img,hipMemcpyHostToDevice); //sobel<<<grid, block>>>(data_d,out_d,rows,cols); struct timeval start, stop; dim3 block(32,32); dim3 grid( ( rows - 1) / block.x + 1 , ( cols - 1 ) / block.y + 1 ); gettimeofday(&start, 0); hipLaunchKernelGGL(( sobel), dim3(grid), dim3(block), 0, 0, data_d,out_d,rows,cols); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if(err != hipSuccess){ cout << "Error : " << hipGetErrorString(err) << endl; exit(EXIT_FAILURE); } gettimeofday(&stop, 0); cout << "elapsed time: " << (((stop.tv_sec*1000000+stop.tv_usec) - (start.tv_sec*1000000+start.tv_usec))/1000) << " ms " << endl; hipMemcpy(out,out_d,size_img,hipMemcpyDeviceToHost); ilSetData(out); // Sauvegarde de l'image ilEnable(IL_FILE_OVERWRITE); ilSaveImage("out.jpg"); ilDeleteImages(1, &image); free(out); hipFree(out_d); hipFree(data_d); return 0; }
b36510b3be42957cf7f2fcebdb57963f0590ba6a.cu
#include <iostream> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <IL/il.h> using namespace std; __global__ void sobel(unsigned char *data,unsigned char *out,std::size_t rows, std::size_t cols){ auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; int h,v,res; if(idx > 0 && idx < (rows-1) && idy > 0 && idy < (cols-1)){ for(int c = 0 ; c < 3 ; ++c) { // Horizontal h = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx - 1) * cols + idy + 1) * 3 + c] + 2 * data[( idx * cols + idy - 1) * 3 + c] - 2 * data[( idx * cols + idy + 1) * 3 + c] + data[((idx + 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c]; // Vertical v = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy - 1) * 3 + c] + 2 * data[((idx - 1) * cols + idy ) * 3 + c] - 2 * data[((idx + 1) * cols + idy ) * 3 + c] + data[((idx - 1) * cols + idy + 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c]; res = h*h + v*v; res = res > 255*255 ? 255*255 : res; out[(idx * cols + idy) * 3 + c] = sqrtf(res); } } } int main() { unsigned int image; ilInit(); ilGenImages(1, &image); ilBindImage(image); ilLoadImage("4v9mo.jpg"); auto cols = ilGetInteger(IL_IMAGE_WIDTH); auto rows = ilGetInteger(IL_IMAGE_HEIGHT); auto bpp = ilGetInteger(IL_IMAGE_BYTES_PER_PIXEL); // Récupération des données de l'image unsigned char* data = ilGetData(); auto size_img = cols * rows * bpp; //Traitement de l'image unsigned char* out = (unsigned char*)malloc(size_img); unsigned char* out_d; unsigned char* data_d; cudaMalloc(&out_d,size_img); cudaMalloc(&data_d,size_img); cudaMemcpy(data_d,data,size_img,cudaMemcpyHostToDevice); //sobel<<<grid, block>>>(data_d,out_d,rows,cols); struct timeval start, stop; dim3 block(32,32); dim3 grid( ( rows - 1) / block.x + 1 , ( cols - 1 ) / block.y + 1 ); gettimeofday(&start, 0); sobel<<<grid, block>>>(data_d,out_d,rows,cols); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess){ cout << "Error : " << cudaGetErrorString(err) << endl; exit(EXIT_FAILURE); } gettimeofday(&stop, 0); cout << "elapsed time: " << (((stop.tv_sec*1000000+stop.tv_usec) - (start.tv_sec*1000000+start.tv_usec))/1000) << " ms " << endl; cudaMemcpy(out,out_d,size_img,cudaMemcpyDeviceToHost); ilSetData(out); // Sauvegarde de l'image ilEnable(IL_FILE_OVERWRITE); ilSaveImage("out.jpg"); ilDeleteImages(1, &image); free(out); cudaFree(out_d); cudaFree(data_d); return 0; }
7524cc985176c1c9159ea7a082977cb0e2ca708d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ #include <stdlib.h> #include <stdio.h> #include "../bubbles/grid.h" #include "../bubbles/streamcontainer.h" #include "gbfmm_coulomb3d.h" #include "../bubbles/cube.h" #include "../bubbles/integrator.h" #include "../bubbles/spherical_harmonics_cuda.h" #include "gbfmm_potential_operator.h" #include "../bubbles/memory_leak_operators.h" #define X_ 0 #define Y_ 1 #define Z_ 2 #define BLOCK_SIZE 512 __host__ inline void check_coulomb_errors(const char *filename, const int line_number) { #ifdef DEBUG_CUDA hipDeviceSynchronize(); #endif hipError_t error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error)); exit(-1); } } /* MISC device functions */ inline __device__ void calculate_distance_vector(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point dist_vec_x=x-reference_point_x; dist_vec_y=y-reference_point_y; dist_vec_z=z-reference_point_z; return; } __device__ inline void getXYZ_(int *x, int *y, int *z) { *x = blockIdx.x * blockDim.x + threadIdx.x; *y = blockIdx.y * blockDim.y + threadIdx.y; *z = blockIdx.z * blockDim.z + threadIdx.z; } /* * Returns the cube pointer offset caused by the x, y, z coordinates with given pitch and memory shape in y-direction */ __device__ inline int getCubeOffset3D_(const int x, const int y, const int z, const size_t pitch, int memory_y_shape) { return z * memory_y_shape * pitch / sizeof(double) + y * pitch / sizeof(double) + x; } /* Kernels and crucial device functions */ __device__ double GBFMMCoulomb3D_evaluate_le_point(const double x, const double y, const double z, const int lmax, const double* __restrict__ local_expansion) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double r2 = x*x+y*y+z*z; // set value for l=0, m=0 double result = 1.0 * local_expansion[lm_address]; // set value for l=1, m=-1 lm_address += 1; result += y * local_expansion[lm_address]; // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // set value for l=1, m=0 lm_address += 1; result += z * local_expansion[lm_address]; // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // set value for l=1, m=1 lm_address += 1; result += x * local_expansion[lm_address]; // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += 1; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); result += new_bottom * local_expansion[lm_address]; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2); for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=l address2 += (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2); for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=l address2 += (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result += top * local_expansion[lm_address]; // get next address lm_address += 1; } return result; } /* * Evaluate Local expansion on a grid */ __global__ void GBFMMCoulomb3D_evaluate_le_grid( double* __restrict__ cube, int lmax, const double* __restrict__ local_expansion, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count) { // The x index will be the fastest to change. int x, y, z; getXYZ_(&x, &y, &z); // get the offset from the input cube pointer const int id = getCubeOffset3D_(x, y, z, pitch, memory_y_shape); double value; double relative_position_x, relative_position_y, relative_position_z; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // calculate relative position to the zero-point and distance to it calculate_distance_vector(relative_position_x, relative_position_y, relative_position_z, zero_point_x, zero_point_y, zero_point_z, grid_points_x[x], grid_points_y[y], grid_points_z[z+slice_offset]); } // calculate the value for local expansion value multiplied with real solid harmonics in Racah's normalization value = GBFMMCoulomb3D_evaluate_le_point(relative_position_x, relative_position_y, relative_position_z, lmax, local_expansion); // if the point resides within the cube, add the value calculated above to the current value if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { cube[id] += value; } return; } /*************************************************** * GBFMMCoulomb3D implementation * * * ***************************************************/ GBFMMCoulomb3D::GBFMMCoulomb3D( // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_in, // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_out, // the maximum angular momentum quantum number 'l' value int lmax, // the box indices for which the evaluation of multipoles and eventually potential is performed by // this node int domain[2], // the first and last cell index in x-direction for each box in domain int *input_start_indices_x, int *input_end_indices_x, // the first and last cell index in y-direction for each box in domain int *input_start_indices_y, int *input_end_indices_y, // the first and last cell index in z-direction for each box in domain int *input_start_indices_z, int *input_end_indices_z, // the first and last cell index in x-direction for each box in domain int *output_start_indices_x, int *output_end_indices_x, // the first and last cell index in y-direction for each box in domain int *output_start_indices_y, int *output_end_indices_y, // the first and last cell index in z-direction for each box in domain int *output_start_indices_z, int *output_end_indices_z, // the main streamcontainer used to extract the boxwise streamcontainers from StreamContainer *streamContainer) { // init the common things using the function defined in gbfmm_potential_operator.cu initGBFMMPotentialOperator(grid_in, grid_out, lmax, domain, input_start_indices_x, input_end_indices_x, input_start_indices_y, input_end_indices_y, input_start_indices_z, input_end_indices_z, output_start_indices_x, output_end_indices_x, output_start_indices_y, output_end_indices_y, output_start_indices_z, output_end_indices_z, streamContainer); } void GBFMMCoulomb3D::initHarmonics() { //this->harmonics = new RealCubeHarmonics *[this->streamContainer->getNumberOfDevices()]; // initialize the solid-harmonics evaluators. // NOTE: this assumes that each of the boxes have the same shape and that the multipole center is at the center of the // box. If the cube-grid is changed to be non-equidistant at some point, this must be changed to be box-wise. //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // initialize the solid harmonics // this->harmonics[device] = new RealRegularSolidCubeHarmonics(/*lmin=*/0, this->lmax, /*normalization=Racah's*/1, this->input_grids[0]->getShape(), this->device_containers[device]); //} //check_coulomb_errors(__FILE__, __LINE__); // evaluate the solid harmonics on all devices //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // this->harmonics[device]->evaluate(this->device_grids[device], this->centers); //} //check_coulomb_errors(__FILE__, __LINE__); } void GBFMMCoulomb3D::destroyHarmonics() { // destroy the solid harmonics and bessels from all devices //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // this->harmonics[device]->destroy(); // delete this->harmonics[device]; //} //delete[] this->harmonics; } void GBFMMCoulomb3D::initIntegrators() { // init the subgrids and the streamcontainers for each domain box for (int i = 0; i <= this->domain[1]-this->domain[0]; i++) { // initialize the Integrator needed for multipole evaluation with a buffer for (this->lmax+1)*(this->lmax+1) results this->integrators[i] = new GBFMMCoulomb3DMultipoleEvaluator(this->streamContainers[i], this->input_grids[i], this->lmax, &this->centers[i*3]); } } /* * Downloads multipole moments between l:0-this->lmax for each box belonging * to the domain of this node. * * NOTE: this functions IS BLOCKING with regards to CUDA */ void GBFMMCoulomb3D::downloadMultipoleMoments(double *host_multipole_moments) { // do the evaluation for the boxes host_multipole_moments = &host_multipole_moments[(domain[0]-1) * (this->lmax+1)*(this->lmax+1)]; for (int i = 0; i <= this->domain[1]-this->domain[0]; i++) { this->integrators[i]->downloadResult(host_multipole_moments); host_multipole_moments += (this->lmax+1)*(this->lmax+1); } } /* * Evaluates multipole moments between l:0-this->lmax for box 'i'. * To use this function, the box 'i' must belong to the domain of this node. * * NOTE: this function IS NOT BLOCKING with regards to CUDA */ void GBFMMCoulomb3D::calculateMultipoleMomentsBox(int i, CudaCube *cube) { this->integrators[i]->setIntegrationCube(cube); this->integrators[i]->integrate(); this->integrators[i]->setIntegrationCube(NULL); //int *cube_device_memory_shape = cube->getDeviceMemoryShape(); //int *integration_device_memory_shape = this->integrators[i]->getIntegrationCube()->getDeviceMemoryShape(); //int lm_cube_offset = 0; //check_coulomb_errors(__FILE__, __LINE__); /*for (int l = 0; l <= this->lmax ; l++) { for (int m = -l; m <= l; m++) { int * solid_harmonics_memory_shape = this->harmonics[0]->getShape(); size_t solid_harmonics_pitch = this->harmonics[0]->getDevicePitch(0); // loop over the devices of the box "i"'s StreamContainer for (int device = 0; device < this->streamContainers[i]->getNumberOfDevices(); device++) { this->streamContainers[i]->setDevice(device); // get the device number in the box "i"'s StreamContainer int device_number = this->streamContainers[i]->getDeviceNumber(device); // get the device order number in global StreamContainer int device_order_number = this->streamContainer->getDeviceOrderNumber(device_number); // get the device order number in cube's streamcontainer int device_cube_order_number = cube->getStreamContainer()->getDeviceOrderNumber(device_number); double *device_cube = cube->getDevicePointer(device_cube_order_number); size_t device_cube_pitch = cube->getDevicePitch(device_cube_order_number); // get the pointer to the temp cube used in integration // NOTE: the integrator has the same streamcontainer as the one looped above double *device_temp_cube = this->integrators[i]->getIntegrationCube()->getDevicePointer(device); size_t device_temp_pitch = this->integrators[i]->getIntegrationCube()->getDevicePitch(device); // get pointer to the first item of the solid harmonics for this device // NOTE: the zeros comes from the fact that there is only one device per streamcontainer of the // SolidHarmonics double *device_solid_harmonics = this->harmonics[device_order_number]->getDeviceResults(0); // get the number of slices handled by this device int device_slice_count = cube->getShape(Z_) / this->streamContainers[i]->getNumberOfDevices() + ((cube->getShape(Z_) % this->streamContainers[i]->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainers[i]->getStreamsPerDevice(); stream++) { hipStream_t *streamObject = this->streamContainers[i]->getStream(device, stream); // get the number of slices handled by this stream int slice_count = device_slice_count / this->streamContainers[i]->getStreamsPerDevice() + ((device_slice_count % this->streamContainers[i]->getStreamsPerDevice()) > stream); // get the launch configuration for the multiplication and integration dim3 block, grid; getCubeLaunchConfiguration(&grid, &block, cube->getShape(), slice_count, 256); // multiply the solid harmonics with the cube and store to device_temp_cube, i.e., "this->integrators[i]->getIntegrationCube()" multiply_3d_cubes(&device_solid_harmonics[lm_cube_offset], cube->getShape(X_), cube->getShape(Y_), solid_harmonics_memory_shape[Y_], solid_harmonics_pitch, device_cube, cube->getShape(X_), cube->getShape(Y_), cube_device_memory_shape[Y_], device_cube_pitch, device_temp_cube, this->integrators[i]->getIntegrationCube()->getShape(X_), this->integrators[i]->getIntegrationCube()->getShape(Y_), integration_device_memory_shape[Y_], device_temp_pitch, slice_count, &grid, &block, streamObject); check_coulomb_errors(__FILE__, __LINE__); // add to the pointers device_solid_harmonics += slice_count * solid_harmonics_memory_shape[Y_] * solid_harmonics_pitch / sizeof(double); device_cube += slice_count * cube_device_memory_shape[Y_] * device_cube_pitch / sizeof(double); device_temp_cube += slice_count * integration_device_memory_shape[Y_] * device_temp_pitch / sizeof(double); } } // add to the offset caused by the l, m-cubes lm_cube_offset += solid_harmonics_memory_shape[Y_] * solid_harmonics_memory_shape[Z_] * solid_harmonics_pitch / sizeof(double); // start the integration process at the GPUs // NOTE: this is not blocking // NOTE: the results are stored to the buffer of the integrator this->integrators[i]->integrate(); } }*/ } /* * Evaluates the potential within space of a single box. */ void GBFMMCoulomb3D::evaluatePotentialLEBox( // order number of the box within the domain of // this node (the indexing should start from 0) int i, // pointer to the HOST memory of the local expansion // for box double *local_expansion, double zero_point[3], Grid3D *grid3d, CudaCube *output_cube, StreamContainer *streamContainer) { double **device_cubes = output_cube->getDeviceCubes(); size_t *device_pitches = output_cube->getDevicePitches(); int shape_x = output_cube->getShape(X_); int shape_y = output_cube->getShape(Y_); int shape_z = output_cube->getShape(Z_); int *device_memory_shape = output_cube->getDeviceMemoryShape(); int slice_offset = 0; //check_coulomb_errors(__FILE__, __LINE__); // check if we are at the borders of the boxes, that are not the // borders of the global grid if (this->output_end_indices_x[i] != this->grid_out->axis[X_]->ncell) { shape_x -= 1; } if (this->output_end_indices_y[i] != this->grid_out->axis[Y_]->ncell) { shape_y -= 1; } if (this->output_end_indices_z[i] != this->grid_out->axis[Z_]->ncell) { shape_z -= 1; } for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) { // get global device order number int device_number = streamContainer->getDeviceNumber(device); int global_device_order_number = this->streamContainer->getDeviceOrderNumber(device_number); // set the correct device streamContainer->setDevice(device); // get the device cube pointer & pitch double *device_cube = device_cubes[device]; size_t device_pitch = device_pitches[device]; // set the device expansion pointer double *device_expansion = &this->device_expansions[global_device_order_number][i * (this->lmax+1)*(this->lmax+1)]; int device_slice_count = shape_z / streamContainer->getNumberOfDevices() + ((shape_z % streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream ++) { // upload the expansion hipMemcpyAsync(device_expansion, local_expansion, sizeof(double)*(this->lmax + 1)*(this->lmax +1), hipMemcpyHostToDevice, *streamContainer->getStream(device, stream)); //check_coulomb_errors(__FILE__, __LINE__); int slice_count = device_slice_count / streamContainer->getStreamsPerDevice() + ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream); // get the launch configuration dim3 block, grid; output_cube->getLaunchConfiguration(&grid, &block, slice_count, BLOCK_SIZE); // evaluate the real regular solid harmonics multiplied with the local expansion values // for the slices belonging for this stream hipLaunchKernelGGL(( GBFMMCoulomb3D_evaluate_le_grid), dim3(grid), dim3(block), 0, *streamContainer->getStream(device, stream) , device_cube, this->lmax, device_expansion, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], shape_x, shape_y, shape_z, zero_point[X_], zero_point[Y_], zero_point[Z_], slice_offset, device_pitch, device_memory_shape[Y_], slice_count); //check_coulomb_errors(__FILE__, __LINE__); // add the counter with the number of slices handled so far slice_offset += slice_count; device_cube = &device_cube[slice_count * device_memory_shape[Y_] * device_pitch / sizeof(double)]; } } } /*************************************************** * Fortran interfaces * * * ***************************************************/ extern "C" GBFMMCoulomb3D *gbfmmcoulomb3d_init_cuda( // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_in, // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_out, // the maximum angular momentum quantum number 'l' value int lmax, // the box indices for which the evaluation of multipoles and eventually potential is performed by // this node int domain[2], // the first and last cell index in x-direction for each box in domain int *input_start_indices_x, int *input_end_indices_x, // the first and last cell index in y-direction for each box in domain int *input_start_indices_y, int *input_end_indices_y, // the first and last cell index in z-direction for each box in domain int *input_start_indices_z, int *input_end_indices_z, // the first and last cell index in x-direction for each box in domain int *output_start_indices_x, int *output_end_indices_x, // the first and last cell index in y-direction for each box in domain int *output_start_indices_y, int *output_end_indices_y, // the first and last cell index in z-direction for each box in domain int *output_start_indices_z, int *output_end_indices_z, // the main streamcontainer used to extract the boxwise streamcontainers from StreamContainer *streamContainer) { GBFMMCoulomb3D *new_gbfmm_coulomb3d = new GBFMMCoulomb3D(grid_in, grid_out, lmax, domain, input_start_indices_x, input_end_indices_x, input_start_indices_y, input_end_indices_y, input_start_indices_z, input_end_indices_z, output_start_indices_x, output_end_indices_x, output_start_indices_y, output_end_indices_y, output_start_indices_z, output_end_indices_z, streamContainer); return new_gbfmm_coulomb3d; } extern "C" void gbfmmcoulomb3d_init_harmonics_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->initHarmonics(); } extern "C" void gbfmmcoulomb3d_destroy_harmonics_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->destroyHarmonics(); } extern "C" void gbfmmcoulomb3d_calculate_multipole_moments_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the cube for which the multipole moments are evaluated // the boxes needed for multipole are uploaded for CudaCube *input_cube) { gbfmm_coulomb3d->calculateMultipoleMoments(input_cube); } extern "C" void gbfmmcoulomb3d_download_multipole_moments_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator, for which // the multipole moments are calculated GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the 2-dimensional array residing in host memory in which the multipole moments are stored double *host_multipole_moments) { gbfmm_coulomb3d->downloadMultipoleMoments(host_multipole_moments); } extern "C" void gbfmmcoulomb3d_upload_domain_boxes_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator, for which // the multipole moments are calculated GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the cube for which the multipole moments are evaluated CudaCube *input_cube) { gbfmm_coulomb3d->uploadDomainBoxes(input_cube); } extern "C" void gbfmmcoulomb3d_evaluate_potential_le_cuda( GBFMMCoulomb3D *gbfmm_coulomb3d, double *local_expansion, CudaCube *output_cube) { gbfmm_coulomb3d->evaluatePotentialLE(local_expansion, output_cube); } extern "C" StreamContainer* gbfmmcoulomb3d_get_box_stream_container_cuda( GBFMMCoulomb3D *gbfmm_coulomb3d, int ibox) { return gbfmm_coulomb3d->getBoxStreamContainer(ibox); } extern "C" void gbfmmcoulomb3d_destroy_cuda( // the destroyed gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->destroy(); }
7524cc985176c1c9159ea7a082977cb0e2ca708d.cu
/*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ #include <stdlib.h> #include <stdio.h> #include "../bubbles/grid.h" #include "../bubbles/streamcontainer.h" #include "gbfmm_coulomb3d.h" #include "../bubbles/cube.h" #include "../bubbles/integrator.h" #include "../bubbles/spherical_harmonics_cuda.h" #include "gbfmm_potential_operator.h" #include "../bubbles/memory_leak_operators.h" #define X_ 0 #define Y_ 1 #define Z_ 2 #define BLOCK_SIZE 512 __host__ inline void check_coulomb_errors(const char *filename, const int line_number) { #ifdef DEBUG_CUDA cudaThreadSynchronize(); #endif cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error)); exit(-1); } } /* MISC device functions */ inline __device__ void calculate_distance_vector(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point dist_vec_x=x-reference_point_x; dist_vec_y=y-reference_point_y; dist_vec_z=z-reference_point_z; return; } __device__ inline void getXYZ_(int *x, int *y, int *z) { *x = blockIdx.x * blockDim.x + threadIdx.x; *y = blockIdx.y * blockDim.y + threadIdx.y; *z = blockIdx.z * blockDim.z + threadIdx.z; } /* * Returns the cube pointer offset caused by the x, y, z coordinates with given pitch and memory shape in y-direction */ __device__ inline int getCubeOffset3D_(const int x, const int y, const int z, const size_t pitch, int memory_y_shape) { return z * memory_y_shape * pitch / sizeof(double) + y * pitch / sizeof(double) + x; } /* Kernels and crucial device functions */ __device__ double GBFMMCoulomb3D_evaluate_le_point(const double x, const double y, const double z, const int lmax, const double* __restrict__ local_expansion) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double r2 = x*x+y*y+z*z; // set value for l=0, m=0 double result = 1.0 * local_expansion[lm_address]; // set value for l=1, m=-1 lm_address += 1; result += y * local_expansion[lm_address]; // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // set value for l=1, m=0 lm_address += 1; result += z * local_expansion[lm_address]; // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // set value for l=1, m=1 lm_address += 1; result += x * local_expansion[lm_address]; // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=0 address2 += (2*l+2); } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += 1; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); result += new_bottom * local_expansion[lm_address]; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2); for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=l address2 += (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2); for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result += current * local_expansion[address2]; // add the address2 to get to the next item with m=l address2 += (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result += top * local_expansion[lm_address]; // get next address lm_address += 1; } return result; } /* * Evaluate Local expansion on a grid */ __global__ void GBFMMCoulomb3D_evaluate_le_grid( double* __restrict__ cube, int lmax, const double* __restrict__ local_expansion, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count) { // The x index will be the fastest to change. int x, y, z; getXYZ_(&x, &y, &z); // get the offset from the input cube pointer const int id = getCubeOffset3D_(x, y, z, pitch, memory_y_shape); double value; double relative_position_x, relative_position_y, relative_position_z; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // calculate relative position to the zero-point and distance to it calculate_distance_vector(relative_position_x, relative_position_y, relative_position_z, zero_point_x, zero_point_y, zero_point_z, grid_points_x[x], grid_points_y[y], grid_points_z[z+slice_offset]); } // calculate the value for local expansion value multiplied with real solid harmonics in Racah's normalization value = GBFMMCoulomb3D_evaluate_le_point(relative_position_x, relative_position_y, relative_position_z, lmax, local_expansion); // if the point resides within the cube, add the value calculated above to the current value if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { cube[id] += value; } return; } /*************************************************** * GBFMMCoulomb3D implementation * * * ***************************************************/ GBFMMCoulomb3D::GBFMMCoulomb3D( // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_in, // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_out, // the maximum angular momentum quantum number 'l' value int lmax, // the box indices for which the evaluation of multipoles and eventually potential is performed by // this node int domain[2], // the first and last cell index in x-direction for each box in domain int *input_start_indices_x, int *input_end_indices_x, // the first and last cell index in y-direction for each box in domain int *input_start_indices_y, int *input_end_indices_y, // the first and last cell index in z-direction for each box in domain int *input_start_indices_z, int *input_end_indices_z, // the first and last cell index in x-direction for each box in domain int *output_start_indices_x, int *output_end_indices_x, // the first and last cell index in y-direction for each box in domain int *output_start_indices_y, int *output_end_indices_y, // the first and last cell index in z-direction for each box in domain int *output_start_indices_z, int *output_end_indices_z, // the main streamcontainer used to extract the boxwise streamcontainers from StreamContainer *streamContainer) { // init the common things using the function defined in gbfmm_potential_operator.cu initGBFMMPotentialOperator(grid_in, grid_out, lmax, domain, input_start_indices_x, input_end_indices_x, input_start_indices_y, input_end_indices_y, input_start_indices_z, input_end_indices_z, output_start_indices_x, output_end_indices_x, output_start_indices_y, output_end_indices_y, output_start_indices_z, output_end_indices_z, streamContainer); } void GBFMMCoulomb3D::initHarmonics() { //this->harmonics = new RealCubeHarmonics *[this->streamContainer->getNumberOfDevices()]; // initialize the solid-harmonics evaluators. // NOTE: this assumes that each of the boxes have the same shape and that the multipole center is at the center of the // box. If the cube-grid is changed to be non-equidistant at some point, this must be changed to be box-wise. //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // initialize the solid harmonics // this->harmonics[device] = new RealRegularSolidCubeHarmonics(/*lmin=*/0, this->lmax, /*normalization=Racah's*/1, this->input_grids[0]->getShape(), this->device_containers[device]); //} //check_coulomb_errors(__FILE__, __LINE__); // evaluate the solid harmonics on all devices //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // this->harmonics[device]->evaluate(this->device_grids[device], this->centers); //} //check_coulomb_errors(__FILE__, __LINE__); } void GBFMMCoulomb3D::destroyHarmonics() { // destroy the solid harmonics and bessels from all devices //for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // this->harmonics[device]->destroy(); // delete this->harmonics[device]; //} //delete[] this->harmonics; } void GBFMMCoulomb3D::initIntegrators() { // init the subgrids and the streamcontainers for each domain box for (int i = 0; i <= this->domain[1]-this->domain[0]; i++) { // initialize the Integrator needed for multipole evaluation with a buffer for (this->lmax+1)*(this->lmax+1) results this->integrators[i] = new GBFMMCoulomb3DMultipoleEvaluator(this->streamContainers[i], this->input_grids[i], this->lmax, &this->centers[i*3]); } } /* * Downloads multipole moments between l:0-this->lmax for each box belonging * to the domain of this node. * * NOTE: this functions IS BLOCKING with regards to CUDA */ void GBFMMCoulomb3D::downloadMultipoleMoments(double *host_multipole_moments) { // do the evaluation for the boxes host_multipole_moments = &host_multipole_moments[(domain[0]-1) * (this->lmax+1)*(this->lmax+1)]; for (int i = 0; i <= this->domain[1]-this->domain[0]; i++) { this->integrators[i]->downloadResult(host_multipole_moments); host_multipole_moments += (this->lmax+1)*(this->lmax+1); } } /* * Evaluates multipole moments between l:0-this->lmax for box 'i'. * To use this function, the box 'i' must belong to the domain of this node. * * NOTE: this function IS NOT BLOCKING with regards to CUDA */ void GBFMMCoulomb3D::calculateMultipoleMomentsBox(int i, CudaCube *cube) { this->integrators[i]->setIntegrationCube(cube); this->integrators[i]->integrate(); this->integrators[i]->setIntegrationCube(NULL); //int *cube_device_memory_shape = cube->getDeviceMemoryShape(); //int *integration_device_memory_shape = this->integrators[i]->getIntegrationCube()->getDeviceMemoryShape(); //int lm_cube_offset = 0; //check_coulomb_errors(__FILE__, __LINE__); /*for (int l = 0; l <= this->lmax ; l++) { for (int m = -l; m <= l; m++) { int * solid_harmonics_memory_shape = this->harmonics[0]->getShape(); size_t solid_harmonics_pitch = this->harmonics[0]->getDevicePitch(0); // loop over the devices of the box "i"'s StreamContainer for (int device = 0; device < this->streamContainers[i]->getNumberOfDevices(); device++) { this->streamContainers[i]->setDevice(device); // get the device number in the box "i"'s StreamContainer int device_number = this->streamContainers[i]->getDeviceNumber(device); // get the device order number in global StreamContainer int device_order_number = this->streamContainer->getDeviceOrderNumber(device_number); // get the device order number in cube's streamcontainer int device_cube_order_number = cube->getStreamContainer()->getDeviceOrderNumber(device_number); double *device_cube = cube->getDevicePointer(device_cube_order_number); size_t device_cube_pitch = cube->getDevicePitch(device_cube_order_number); // get the pointer to the temp cube used in integration // NOTE: the integrator has the same streamcontainer as the one looped above double *device_temp_cube = this->integrators[i]->getIntegrationCube()->getDevicePointer(device); size_t device_temp_pitch = this->integrators[i]->getIntegrationCube()->getDevicePitch(device); // get pointer to the first item of the solid harmonics for this device // NOTE: the zeros comes from the fact that there is only one device per streamcontainer of the // SolidHarmonics double *device_solid_harmonics = this->harmonics[device_order_number]->getDeviceResults(0); // get the number of slices handled by this device int device_slice_count = cube->getShape(Z_) / this->streamContainers[i]->getNumberOfDevices() + ((cube->getShape(Z_) % this->streamContainers[i]->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainers[i]->getStreamsPerDevice(); stream++) { cudaStream_t *streamObject = this->streamContainers[i]->getStream(device, stream); // get the number of slices handled by this stream int slice_count = device_slice_count / this->streamContainers[i]->getStreamsPerDevice() + ((device_slice_count % this->streamContainers[i]->getStreamsPerDevice()) > stream); // get the launch configuration for the multiplication and integration dim3 block, grid; getCubeLaunchConfiguration(&grid, &block, cube->getShape(), slice_count, 256); // multiply the solid harmonics with the cube and store to device_temp_cube, i.e., "this->integrators[i]->getIntegrationCube()" multiply_3d_cubes(&device_solid_harmonics[lm_cube_offset], cube->getShape(X_), cube->getShape(Y_), solid_harmonics_memory_shape[Y_], solid_harmonics_pitch, device_cube, cube->getShape(X_), cube->getShape(Y_), cube_device_memory_shape[Y_], device_cube_pitch, device_temp_cube, this->integrators[i]->getIntegrationCube()->getShape(X_), this->integrators[i]->getIntegrationCube()->getShape(Y_), integration_device_memory_shape[Y_], device_temp_pitch, slice_count, &grid, &block, streamObject); check_coulomb_errors(__FILE__, __LINE__); // add to the pointers device_solid_harmonics += slice_count * solid_harmonics_memory_shape[Y_] * solid_harmonics_pitch / sizeof(double); device_cube += slice_count * cube_device_memory_shape[Y_] * device_cube_pitch / sizeof(double); device_temp_cube += slice_count * integration_device_memory_shape[Y_] * device_temp_pitch / sizeof(double); } } // add to the offset caused by the l, m-cubes lm_cube_offset += solid_harmonics_memory_shape[Y_] * solid_harmonics_memory_shape[Z_] * solid_harmonics_pitch / sizeof(double); // start the integration process at the GPUs // NOTE: this is not blocking // NOTE: the results are stored to the buffer of the integrator this->integrators[i]->integrate(); } }*/ } /* * Evaluates the potential within space of a single box. */ void GBFMMCoulomb3D::evaluatePotentialLEBox( // order number of the box within the domain of // this node (the indexing should start from 0) int i, // pointer to the HOST memory of the local expansion // for box double *local_expansion, double zero_point[3], Grid3D *grid3d, CudaCube *output_cube, StreamContainer *streamContainer) { double **device_cubes = output_cube->getDeviceCubes(); size_t *device_pitches = output_cube->getDevicePitches(); int shape_x = output_cube->getShape(X_); int shape_y = output_cube->getShape(Y_); int shape_z = output_cube->getShape(Z_); int *device_memory_shape = output_cube->getDeviceMemoryShape(); int slice_offset = 0; //check_coulomb_errors(__FILE__, __LINE__); // check if we are at the borders of the boxes, that are not the // borders of the global grid if (this->output_end_indices_x[i] != this->grid_out->axis[X_]->ncell) { shape_x -= 1; } if (this->output_end_indices_y[i] != this->grid_out->axis[Y_]->ncell) { shape_y -= 1; } if (this->output_end_indices_z[i] != this->grid_out->axis[Z_]->ncell) { shape_z -= 1; } for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) { // get global device order number int device_number = streamContainer->getDeviceNumber(device); int global_device_order_number = this->streamContainer->getDeviceOrderNumber(device_number); // set the correct device streamContainer->setDevice(device); // get the device cube pointer & pitch double *device_cube = device_cubes[device]; size_t device_pitch = device_pitches[device]; // set the device expansion pointer double *device_expansion = &this->device_expansions[global_device_order_number][i * (this->lmax+1)*(this->lmax+1)]; int device_slice_count = shape_z / streamContainer->getNumberOfDevices() + ((shape_z % streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream ++) { // upload the expansion cudaMemcpyAsync(device_expansion, local_expansion, sizeof(double)*(this->lmax + 1)*(this->lmax +1), cudaMemcpyHostToDevice, *streamContainer->getStream(device, stream)); //check_coulomb_errors(__FILE__, __LINE__); int slice_count = device_slice_count / streamContainer->getStreamsPerDevice() + ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream); // get the launch configuration dim3 block, grid; output_cube->getLaunchConfiguration(&grid, &block, slice_count, BLOCK_SIZE); // evaluate the real regular solid harmonics multiplied with the local expansion values // for the slices belonging for this stream GBFMMCoulomb3D_evaluate_le_grid<<< grid, block, 0, *streamContainer->getStream(device, stream) >>> (device_cube, this->lmax, device_expansion, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], shape_x, shape_y, shape_z, zero_point[X_], zero_point[Y_], zero_point[Z_], slice_offset, device_pitch, device_memory_shape[Y_], slice_count); //check_coulomb_errors(__FILE__, __LINE__); // add the counter with the number of slices handled so far slice_offset += slice_count; device_cube = &device_cube[slice_count * device_memory_shape[Y_] * device_pitch / sizeof(double)]; } } } /*************************************************** * Fortran interfaces * * * ***************************************************/ extern "C" GBFMMCoulomb3D *gbfmmcoulomb3d_init_cuda( // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_in, // the grid from which the subgrids are extracted from (should represent the entire domain) needed // to evaluate coulomb potential for using gbfmm Grid3D *grid_out, // the maximum angular momentum quantum number 'l' value int lmax, // the box indices for which the evaluation of multipoles and eventually potential is performed by // this node int domain[2], // the first and last cell index in x-direction for each box in domain int *input_start_indices_x, int *input_end_indices_x, // the first and last cell index in y-direction for each box in domain int *input_start_indices_y, int *input_end_indices_y, // the first and last cell index in z-direction for each box in domain int *input_start_indices_z, int *input_end_indices_z, // the first and last cell index in x-direction for each box in domain int *output_start_indices_x, int *output_end_indices_x, // the first and last cell index in y-direction for each box in domain int *output_start_indices_y, int *output_end_indices_y, // the first and last cell index in z-direction for each box in domain int *output_start_indices_z, int *output_end_indices_z, // the main streamcontainer used to extract the boxwise streamcontainers from StreamContainer *streamContainer) { GBFMMCoulomb3D *new_gbfmm_coulomb3d = new GBFMMCoulomb3D(grid_in, grid_out, lmax, domain, input_start_indices_x, input_end_indices_x, input_start_indices_y, input_end_indices_y, input_start_indices_z, input_end_indices_z, output_start_indices_x, output_end_indices_x, output_start_indices_y, output_end_indices_y, output_start_indices_z, output_end_indices_z, streamContainer); return new_gbfmm_coulomb3d; } extern "C" void gbfmmcoulomb3d_init_harmonics_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->initHarmonics(); } extern "C" void gbfmmcoulomb3d_destroy_harmonics_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->destroyHarmonics(); } extern "C" void gbfmmcoulomb3d_calculate_multipole_moments_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the cube for which the multipole moments are evaluated // the boxes needed for multipole are uploaded for CudaCube *input_cube) { gbfmm_coulomb3d->calculateMultipoleMoments(input_cube); } extern "C" void gbfmmcoulomb3d_download_multipole_moments_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator, for which // the multipole moments are calculated GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the 2-dimensional array residing in host memory in which the multipole moments are stored double *host_multipole_moments) { gbfmm_coulomb3d->downloadMultipoleMoments(host_multipole_moments); } extern "C" void gbfmmcoulomb3d_upload_domain_boxes_cuda( // a pointer to the pre-inited gbfmm coulomb3d operator, for which // the multipole moments are calculated GBFMMCoulomb3D *gbfmm_coulomb3d, // a pointer to the cube for which the multipole moments are evaluated CudaCube *input_cube) { gbfmm_coulomb3d->uploadDomainBoxes(input_cube); } extern "C" void gbfmmcoulomb3d_evaluate_potential_le_cuda( GBFMMCoulomb3D *gbfmm_coulomb3d, double *local_expansion, CudaCube *output_cube) { gbfmm_coulomb3d->evaluatePotentialLE(local_expansion, output_cube); } extern "C" StreamContainer* gbfmmcoulomb3d_get_box_stream_container_cuda( GBFMMCoulomb3D *gbfmm_coulomb3d, int ibox) { return gbfmm_coulomb3d->getBoxStreamContainer(ibox); } extern "C" void gbfmmcoulomb3d_destroy_cuda( // the destroyed gbfmm coulomb3d operator GBFMMCoulomb3D *gbfmm_coulomb3d) { gbfmm_coulomb3d->destroy(); }
26b6c5493083d3798082fa4d04f68eeb13c838ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The file has been adapted from the two files: https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4 We retain the following license from the original files: Copyright 2021, Jiaao He Licensed under the Apache License, Version 2.0 (the "License"). */ #include "paddle/fluid/operators/assign_pos_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(avoid_op_randomness); namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void AssignPos(T* cum_count, const T* numbers, T* out, int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { int number_idx = numbers[i]; if (number_idx > -1) { int p = platform::CudaAtomicAdd(cum_count + number_idx, -1); out[p - 1] = i; } } } template <typename T> class AssignPosCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // assign pos decides which tokens should be fetched belong to specially // counter orderingly. auto cum_count = context.Input<LoDTensor>( "cum_count"); // (counter number) int32 | int64 auto numbers = context.Input<LoDTensor>("X"); // (batch_size * seq_len, topk) int32 auto eff_num_len = context.Input<LoDTensor>("eff_num_len"); // (sum(cum_count)) auto out = context.Output<LoDTensor>("Out"); // (cum_count) value ranges // from 0 to batch_size * // seq_len * topk auto place = context.GetPlace(); auto numel = numbers->numel(); T* cum_data = const_cast<T*>(cum_count->data<T>()); auto cum_size = cum_count->numel(); phi::DenseTensor cpu_eff_num_len; int64_t cpu_eff_num_len_data = 0; if (platform::is_cpu_place(eff_num_len->place())) { cpu_eff_num_len_data = eff_num_len->data<T>()[0]; } else { framework::TensorCopySync( *eff_num_len, platform::CPUPlace(), &cpu_eff_num_len); cpu_eff_num_len_data = cpu_eff_num_len.data<T>()[0]; } const auto& dev_ctx = context.template device_context<phi::GPUContext>(); framework::DDim out_dims = phi::make_ddim({cpu_eff_num_len_data}); auto out_data = out->mutable_data<T>(out_dims, place); const T* num_data = numbers->data<T>(); int blocks = NumBlocks(numel); int threads = kNumCUDAThreads; hipLaunchKernelGGL(( AssignPos<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), cum_data, num_data, out_data, numel); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(assign_pos, ops::AssignPosCUDAKernel<int64_t>);
26b6c5493083d3798082fa4d04f68eeb13c838ae.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The file has been adapted from the two files: https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4 We retain the following license from the original files: Copyright 2021, Jiaao He Licensed under the Apache License, Version 2.0 (the "License"). */ #include "paddle/fluid/operators/assign_pos_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(avoid_op_randomness); namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void AssignPos(T* cum_count, const T* numbers, T* out, int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { int number_idx = numbers[i]; if (number_idx > -1) { int p = platform::CudaAtomicAdd(cum_count + number_idx, -1); out[p - 1] = i; } } } template <typename T> class AssignPosCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // assign pos decides which tokens should be fetched belong to specially // counter orderingly. auto cum_count = context.Input<LoDTensor>( "cum_count"); // (counter number) int32 | int64 auto numbers = context.Input<LoDTensor>("X"); // (batch_size * seq_len, topk) int32 auto eff_num_len = context.Input<LoDTensor>("eff_num_len"); // (sum(cum_count)) auto out = context.Output<LoDTensor>("Out"); // (cum_count) value ranges // from 0 to batch_size * // seq_len * topk auto place = context.GetPlace(); auto numel = numbers->numel(); T* cum_data = const_cast<T*>(cum_count->data<T>()); auto cum_size = cum_count->numel(); phi::DenseTensor cpu_eff_num_len; int64_t cpu_eff_num_len_data = 0; if (platform::is_cpu_place(eff_num_len->place())) { cpu_eff_num_len_data = eff_num_len->data<T>()[0]; } else { framework::TensorCopySync( *eff_num_len, platform::CPUPlace(), &cpu_eff_num_len); cpu_eff_num_len_data = cpu_eff_num_len.data<T>()[0]; } const auto& dev_ctx = context.template device_context<phi::GPUContext>(); framework::DDim out_dims = phi::make_ddim({cpu_eff_num_len_data}); auto out_data = out->mutable_data<T>(out_dims, place); const T* num_data = numbers->data<T>(); int blocks = NumBlocks(numel); int threads = kNumCUDAThreads; AssignPos<T><<<blocks, threads, 0, dev_ctx.stream()>>>( cum_data, num_data, out_data, numel); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(assign_pos, ops::AssignPosCUDAKernel<int64_t>);
f6253bf03bcb86779ecfb93bec26858f8c65672f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "core/common.cuh" #include <assert.h> template <typename T> __global__ void embedding_lookup_acc(const T *embedding_table,const int hidden_units,const int64_t *input_ids,T *embedding_lookup_tensor,const int ratio ,const int num_id) { int64_t ids = input_ids[blockIdx.x]; int hidden_idx = threadIdx.x + blockIdx.y * blockDim.x; int hidden_size = hidden_units; T val = __ldg(&embedding_table[ids * hidden_size + hidden_idx]); for(int i = 0;i< ratio;i++) { embedding_lookup_tensor[i * num_id * hidden_size + blockIdx.x * hidden_size + hidden_idx] += val; } } template <typename T> __global__ void embedding_lookup(const T *embedding_table,const int hidden_units, const int64_t *input_ids, T *embedding_lookup_tensor,bool no_scale_embedding) { int64_t ids = input_ids[blockIdx.x]; int hidden_idx = threadIdx.x + blockIdx.y * blockDim.x; int hidden_size = hidden_units; T scale = 1.0; if( !no_scale_embedding) { scale = (T)sqrtf(float(hidden_units)); } T val = __ldg(&embedding_table[ids * hidden_size + hidden_idx]) * scale; embedding_lookup_tensor[blockIdx.x * hidden_size + hidden_idx] = val ; } template <typename T> void embedding_lookup_kernel(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id,const int hidden_units, hipStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding) { const int m = num_id; int k = hidden_units; assert(m <= 65536); int fold_coeff = 1; if (k <= 1024){ fold_coeff = 1; }else if( k <= 2048){ fold_coeff = 2; }else if(k <= 4096){ fold_coeff = 4; }else if(k <= 8192){ fold_coeff = 8; }else if(k <= 16384){ fold_coeff = 16; } dim3 grid(m , fold_coeff); dim3 block(k / fold_coeff); // dim3 grid(num_id); // dim3 block(hidden_units); // assert(hidden_units <= 1024); if(ifacc) { hipLaunchKernelGGL(( embedding_lookup_acc), dim3(grid), dim3(block), 0, stream, (T*)embedding_table,hidden_units, input_ids, (T*)embedding_lookup_tensor,ratio,num_id); } else { hipLaunchKernelGGL(( embedding_lookup), dim3(grid), dim3(block), 0, stream, (T*)embedding_table, hidden_units,input_ids, (T*)embedding_lookup_tensor,no_scale_embedding); } } template void embedding_lookup_kernel<float>(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id, const int hidden_units, hipStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding); template void embedding_lookup_kernel<half>(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id, const int hidden_units, hipStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding); template<typename T> __global__ void position_encoding(T* output,const int64_t* positions,int seq_len, int step, int padding_idx,int n){ int tid = threadIdx.x + blockIdx.y * blockDim.x; int bid = blockIdx.x; float half_n = (float)n / 2.; int cuda_step = 0; // full decoder T encoding_val = 0; float log_result = __logf(10000) / (half_n - 1.f); float exp_result = __expf( (tid % (int)half_n) * -1 * log_result ); if (seq_len >1 && positions[bid] != 1) { cuda_step = positions[bid]; /*bid % seq_len + padding_idx + 1;*/ float scaled_time = exp_result * cuda_step; encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time); } else if (seq_len == 1) { cuda_step = step + padding_idx + 1; float scaled_time = exp_result * cuda_step; encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time); } output[bid * n + tid] = output[bid * n + tid] + encoding_val; } template<typename T> void position_encoding_kernel( void* output,const int64_t* positions, int seq_len,int step, int padding_idx, int m, int n, hipStream_t stream) { int fold_coeff = 1; int k = n; if (k <= 1024){ fold_coeff = 1; }else if( k <= 2048){ fold_coeff = 2; }else if(k <= 4096){ fold_coeff = 4; }else if(k <= 8192){ fold_coeff = 8; }else if(k <= 16384){ fold_coeff = 16; } dim3 grid(m , fold_coeff); dim3 block(k / fold_coeff); // printf("step:%d\n",step); hipLaunchKernelGGL(( position_encoding<T>), dim3(grid), dim3(block), 0, stream, (T*)output,positions, seq_len,step,padding_idx, n); } template void position_encoding_kernel<float>(void* output,const int64_t* positions, int seq_len,int step, int padding_idx,int m, int n, hipStream_t stream); template void position_encoding_kernel<half>(void* output,const int64_t* positions, int seq_len,int step, int padding_idx,int m, int n, hipStream_t stream);
f6253bf03bcb86779ecfb93bec26858f8c65672f.cu
#include <cuda_runtime.h> #include <cuda_fp16.h> #include "core/common.cuh" #include <assert.h> template <typename T> __global__ void embedding_lookup_acc(const T *embedding_table,const int hidden_units,const int64_t *input_ids,T *embedding_lookup_tensor,const int ratio ,const int num_id) { int64_t ids = input_ids[blockIdx.x]; int hidden_idx = threadIdx.x + blockIdx.y * blockDim.x; int hidden_size = hidden_units; T val = __ldg(&embedding_table[ids * hidden_size + hidden_idx]); for(int i = 0;i< ratio;i++) { embedding_lookup_tensor[i * num_id * hidden_size + blockIdx.x * hidden_size + hidden_idx] += val; } } template <typename T> __global__ void embedding_lookup(const T *embedding_table,const int hidden_units, const int64_t *input_ids, T *embedding_lookup_tensor,bool no_scale_embedding) { int64_t ids = input_ids[blockIdx.x]; int hidden_idx = threadIdx.x + blockIdx.y * blockDim.x; int hidden_size = hidden_units; T scale = 1.0; if( !no_scale_embedding) { scale = (T)sqrtf(float(hidden_units)); } T val = __ldg(&embedding_table[ids * hidden_size + hidden_idx]) * scale; embedding_lookup_tensor[blockIdx.x * hidden_size + hidden_idx] = val ; } template <typename T> void embedding_lookup_kernel(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id,const int hidden_units, cudaStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding) { const int m = num_id; int k = hidden_units; assert(m <= 65536); int fold_coeff = 1; if (k <= 1024){ fold_coeff = 1; }else if( k <= 2048){ fold_coeff = 2; }else if(k <= 4096){ fold_coeff = 4; }else if(k <= 8192){ fold_coeff = 8; }else if(k <= 16384){ fold_coeff = 16; } dim3 grid(m , fold_coeff); dim3 block(k / fold_coeff); // dim3 grid(num_id); // dim3 block(hidden_units); // assert(hidden_units <= 1024); if(ifacc) { embedding_lookup_acc<<<grid, block, 0, stream>>>((T*)embedding_table,hidden_units, input_ids, (T*)embedding_lookup_tensor,ratio,num_id); } else { embedding_lookup<<<grid, block, 0, stream>>>((T*)embedding_table, hidden_units,input_ids, (T*)embedding_lookup_tensor,no_scale_embedding); } } template void embedding_lookup_kernel<float>(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id, const int hidden_units, cudaStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding); template void embedding_lookup_kernel<half>(const void* embedding_table, const int64_t* input_ids, void* embedding_lookup_tensor, const int num_id, const int hidden_units, cudaStream_t stream,bool ifacc,const int ratio,bool no_scale_embedding); template<typename T> __global__ void position_encoding(T* output,const int64_t* positions,int seq_len, int step, int padding_idx,int n){ int tid = threadIdx.x + blockIdx.y * blockDim.x; int bid = blockIdx.x; float half_n = (float)n / 2.; int cuda_step = 0; // full decoder T encoding_val = 0; float log_result = __logf(10000) / (half_n - 1.f); float exp_result = __expf( (tid % (int)half_n) * -1 * log_result ); if (seq_len >1 && positions[bid] != 1) { cuda_step = positions[bid]; /*bid % seq_len + padding_idx + 1;*/ float scaled_time = exp_result * cuda_step; encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time); } else if (seq_len == 1) { cuda_step = step + padding_idx + 1; float scaled_time = exp_result * cuda_step; encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time); } output[bid * n + tid] = output[bid * n + tid] + encoding_val; } template<typename T> void position_encoding_kernel( void* output,const int64_t* positions, int seq_len,int step, int padding_idx, int m, int n, cudaStream_t stream) { int fold_coeff = 1; int k = n; if (k <= 1024){ fold_coeff = 1; }else if( k <= 2048){ fold_coeff = 2; }else if(k <= 4096){ fold_coeff = 4; }else if(k <= 8192){ fold_coeff = 8; }else if(k <= 16384){ fold_coeff = 16; } dim3 grid(m , fold_coeff); dim3 block(k / fold_coeff); // printf("step:%d\n",step); position_encoding<T><<<grid, block, 0, stream>>>((T*)output,positions, seq_len,step,padding_idx, n); } template void position_encoding_kernel<float>(void* output,const int64_t* positions, int seq_len,int step, int padding_idx,int m, int n, cudaStream_t stream); template void position_encoding_kernel<half>(void* output,const int64_t* positions, int seq_len,int step, int padding_idx,int m, int n, cudaStream_t stream);
abc6b4dcac5e084361e04e0818bb220da4db3db2.hip
// !!! This is a file automatically generated by hipify!!! /* * Convert RGB image to grayscale image * I = .299f * R + .587f * G + .114f * B * * compile: * nvcc -o rgb2gray rgb2gray.cu -I ../inc `pkg-config --cflags opencv` `pkg-config --libs opencv` */ #include <string> #include <cstdio> #include <time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <opencv2/opencv.hpp> cv::Mat imageRGBA; cv::Mat imageGrey; uchar4 *d_rgbaImage__; unsigned char *d_greyImage__; struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; double get_cpu_time(){ return (double)clock() / CLOCKS_PER_SEC; } size_t numRows(); size_t numCols(); void preProcess(uchar4 **h_rgbaImage, unsigned char **h_greyImage, uchar4 **d_rgbaImage, unsigned char **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); __global__ void rgb2grayGPU_kernel(const uchar4* const rgbaImage, unsigned char* greyImage, size_t numRows, size_t numCols); void rgb2grayCPU(const uchar4* const rgbaImage, unsigned char* const greyImage, size_t numRows, size_t numCols); void rgb2grayGPU(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols); /* * Main routine */ int main(int argc, char ** argv) { uchar4 *h_rgbaImage, *d_rgbaImage; unsigned char *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; std::string processor; if (argc == 4) { processor = std::string(argv[1]); input_file = std::string(argv[2]); output_file = std::string(argv[3]); } else { std::cerr << "Usage: ./rgb2gray -cup/gpu input_file output_file" << std::endl; exit(1); } if (processor == "-gpu") { preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); GpuTimer timer; timer.Start(); rgb2grayGPU(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); timer.Stop(); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); int err = printf("GPU time: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } //check results and output the grey image postProcess(output_file); } else if (processor == "-cpu") { cv::Mat image; image = cv::imread(input_file.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << input_file << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); imageGrey.create(image.rows, image.cols, CV_8UC1); double cpu0 = get_cpu_time(); rgb2grayCPU((uchar4 *)imageRGBA.ptr<unsigned char>(0), imageGrey.ptr<unsigned char>(0), image.rows, image.cols); double t = get_cpu_time() - cpu0; cv::imwrite(output_file.c_str(), imageGrey); printf("CPU time: %f msecs.\n", t * 1000); } return 0; } /* * Functions */ size_t numRows() { return imageRGBA.rows; } size_t numCols() { return imageRGBA.cols; } void preProcess(uchar4 **inputImage, unsigned char **greyImage, uchar4 **d_rgbaImage, unsigned char **d_greyImage, const std::string& filename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); //allocate memory for the output imageGrey.create(image.rows, image.cols, CV_8UC1); //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } *inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0); *greyImage = imageGrey.ptr<unsigned char>(0); const size_t numPixels = numRows() * numCols(); //allocate memory on the device for both input and output checkCudaErrors(hipMalloc(d_rgbaImage, sizeof(uchar4) * numPixels)); checkCudaErrors(hipMalloc(d_greyImage, sizeof(unsigned char) * numPixels)); //make sure no memory is left laying around checkCudaErrors(hipMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //copy input array to GPU checkCudaErrors(hipMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice)); //save as global variables d_rgbaImage__ = *d_rgbaImage; d_greyImage__ = *d_greyImage; } void postProcess(const std::string& output_file) { const int numPixels = numRows() * numCols(); //copy the result back to CPU checkCudaErrors(hipMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost)); //output the image cv::imwrite(output_file.c_str(), imageGrey); //cleanup hipFree(d_rgbaImage__); hipFree(d_greyImage__); } void rgb2grayCPU(const uchar4* const rgbaImage, unsigned char* const greyImage, size_t numRows, size_t numCols) { for (size_t r = 0; r < numRows; r++) { for (size_t c = 0; c < numCols; c++) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } } } __global__ void rgb2grayGPU_kernel(const uchar4* const rgbaImage, unsigned char* greyImage, size_t numRows, size_t numCols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = y * numRows + x; uchar4 rgba = rgbaImage[idx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[idx] = channelSum; } void rgb2grayGPU(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { const dim3 blockSize(32, 32, 1); const dim3 gridSize((numRows + 31) / 32, (numCols + 31) / 32, 1); hipLaunchKernelGGL(( rgb2grayGPU_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); }
abc6b4dcac5e084361e04e0818bb220da4db3db2.cu
/* * Convert RGB image to grayscale image * I = .299f * R + .587f * G + .114f * B * * compile: * nvcc -o rgb2gray rgb2gray.cu -I ../inc `pkg-config --cflags opencv` `pkg-config --libs opencv` */ #include <string> #include <cstdio> #include <time.h> #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <opencv2/opencv.hpp> cv::Mat imageRGBA; cv::Mat imageGrey; uchar4 *d_rgbaImage__; unsigned char *d_greyImage__; struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; double get_cpu_time(){ return (double)clock() / CLOCKS_PER_SEC; } size_t numRows(); size_t numCols(); void preProcess(uchar4 **h_rgbaImage, unsigned char **h_greyImage, uchar4 **d_rgbaImage, unsigned char **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); __global__ void rgb2grayGPU_kernel(const uchar4* const rgbaImage, unsigned char* greyImage, size_t numRows, size_t numCols); void rgb2grayCPU(const uchar4* const rgbaImage, unsigned char* const greyImage, size_t numRows, size_t numCols); void rgb2grayGPU(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols); /* * Main routine */ int main(int argc, char ** argv) { uchar4 *h_rgbaImage, *d_rgbaImage; unsigned char *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; std::string processor; if (argc == 4) { processor = std::string(argv[1]); input_file = std::string(argv[2]); output_file = std::string(argv[3]); } else { std::cerr << "Usage: ./rgb2gray -cup/gpu input_file output_file" << std::endl; exit(1); } if (processor == "-gpu") { preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); GpuTimer timer; timer.Start(); rgb2grayGPU(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); timer.Stop(); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); int err = printf("GPU time: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } //check results and output the grey image postProcess(output_file); } else if (processor == "-cpu") { cv::Mat image; image = cv::imread(input_file.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << input_file << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); imageGrey.create(image.rows, image.cols, CV_8UC1); double cpu0 = get_cpu_time(); rgb2grayCPU((uchar4 *)imageRGBA.ptr<unsigned char>(0), imageGrey.ptr<unsigned char>(0), image.rows, image.cols); double t = get_cpu_time() - cpu0; cv::imwrite(output_file.c_str(), imageGrey); printf("CPU time: %f msecs.\n", t * 1000); } return 0; } /* * Functions */ size_t numRows() { return imageRGBA.rows; } size_t numCols() { return imageRGBA.cols; } void preProcess(uchar4 **inputImage, unsigned char **greyImage, uchar4 **d_rgbaImage, unsigned char **d_greyImage, const std::string& filename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); //allocate memory for the output imageGrey.create(image.rows, image.cols, CV_8UC1); //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } *inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0); *greyImage = imageGrey.ptr<unsigned char>(0); const size_t numPixels = numRows() * numCols(); //allocate memory on the device for both input and output checkCudaErrors(cudaMalloc(d_rgbaImage, sizeof(uchar4) * numPixels)); checkCudaErrors(cudaMalloc(d_greyImage, sizeof(unsigned char) * numPixels)); //make sure no memory is left laying around checkCudaErrors(cudaMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //copy input array to GPU checkCudaErrors(cudaMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice)); //save as global variables d_rgbaImage__ = *d_rgbaImage; d_greyImage__ = *d_greyImage; } void postProcess(const std::string& output_file) { const int numPixels = numRows() * numCols(); //copy the result back to CPU checkCudaErrors(cudaMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost)); //output the image cv::imwrite(output_file.c_str(), imageGrey); //cleanup cudaFree(d_rgbaImage__); cudaFree(d_greyImage__); } void rgb2grayCPU(const uchar4* const rgbaImage, unsigned char* const greyImage, size_t numRows, size_t numCols) { for (size_t r = 0; r < numRows; r++) { for (size_t c = 0; c < numCols; c++) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } } } __global__ void rgb2grayGPU_kernel(const uchar4* const rgbaImage, unsigned char* greyImage, size_t numRows, size_t numCols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = y * numRows + x; uchar4 rgba = rgbaImage[idx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[idx] = channelSum; } void rgb2grayGPU(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { const dim3 blockSize(32, 32, 1); const dim3 gridSize((numRows + 31) / 32, (numCols + 31) / 32, 1); rgb2grayGPU_kernel<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); }
849120a3ada16e7c8d27e68bebe7500d0af0e8a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_4_front; int xdim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_4_front; int ydim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_4_front; int xdim1_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_4_front; int ydim1_update_halo_kernel2_zvel_minus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_minus_4_front * (y) + \ xdim0_update_halo_kernel2_zvel_minus_4_front * \ ydim0_update_halo_kernel2_zvel_minus_4_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_minus_4_front * (y) + \ xdim1_update_halo_kernel2_zvel_minus_4_front * \ ydim1_update_halo_kernel2_zvel_minus_4_front * (z)) // user function __device__ inline void update_halo_kernel2_zvel_minus_4_front(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, -4)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, -4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_4_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_front * ydim0_update_halo_kernel2_zvel_minus_4_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_front * ydim1_update_halo_kernel2_zvel_minus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_4_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_minus_4_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "update_halo_kernel2_zvel_minus_4_front"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_front_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_front_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_front_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_front_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_4_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_minus_4_front_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_4_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_minus_4_front_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_4_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_minus_4_front_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_4_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_minus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_4_front), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
849120a3ada16e7c8d27e68bebe7500d0af0e8a6.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_4_front; int xdim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_4_front; int ydim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_4_front; int xdim1_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_4_front; int ydim1_update_halo_kernel2_zvel_minus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_minus_4_front * (y) + \ xdim0_update_halo_kernel2_zvel_minus_4_front * \ ydim0_update_halo_kernel2_zvel_minus_4_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_minus_4_front * (y) + \ xdim1_update_halo_kernel2_zvel_minus_4_front * \ ydim1_update_halo_kernel2_zvel_minus_4_front * (z)) // user function __device__ inline void update_halo_kernel2_zvel_minus_4_front(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, -4)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, -4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_4_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_front * ydim0_update_halo_kernel2_zvel_minus_4_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_front * ydim1_update_halo_kernel2_zvel_minus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_4_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_minus_4_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "update_halo_kernel2_zvel_minus_4_front"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_front_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_front_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_front_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_front_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_4_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_minus_4_front_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_4_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_minus_4_front_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_4_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_minus_4_front_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_4_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_minus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_minus_4_front<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
13c670158f8866c41b579b2d350fdc21e48cbeef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sub.hpp" #include <hip/hip_complex.h> #include "utility.hpp" #include "helper_math.h" namespace csmri { //////////////////////////////////////////////////////////////////////////////////////////////////// #ifndef blockDimx #define blockDimx 16 #endif #ifndef blockDimy #define blockDimy 16 #endif #ifndef blockDimz #define blockDimz 1 #endif //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void __sub( float2* inA, float2* inB, float2* out, int dimx, int dimy, int dimz) { //3D global index int3 idx = make_int3( blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y, blockIdx.z*blockDim.z+threadIdx.z); //1D global index int index = idx.z*dimy*dimx + idx.y*dimx + idx.x; //Check valid indices if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz) return; //Do computing out[index] = cuCsubf(inA[index], inB[index]); } //////////////////////////////////////////////////////////////////////////////////////////////////// void sub( float2* inA, float2* inB, float2* out, int dimx, int dimy, int dimz) { dim3 numBlocks( (dimx/blockDimx + ((dimx%blockDimx)?1:0)), (dimy/blockDimy + ((dimy%blockDimy)?1:0)), (dimz/blockDimz + ((dimz%blockDimz)?1:0)) ); dim3 numThreads(blockDimx, blockDimy, blockDimz); hipLaunchKernelGGL(( __sub), dim3(numBlocks), dim3(numThreads), 0, 0, inA, inB, out, dimx, dimy, dimz); } //////////////////////////////////////////////////////////////////////////////////////////////////// }
13c670158f8866c41b579b2d350fdc21e48cbeef.cu
#include "sub.hpp" #include <cuComplex.h> #include "utility.hpp" #include "helper_math.h" namespace csmri { //////////////////////////////////////////////////////////////////////////////////////////////////// #ifndef blockDimx #define blockDimx 16 #endif #ifndef blockDimy #define blockDimy 16 #endif #ifndef blockDimz #define blockDimz 1 #endif //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void __sub( float2* inA, float2* inB, float2* out, int dimx, int dimy, int dimz) { //3D global index int3 idx = make_int3( blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y, blockIdx.z*blockDim.z+threadIdx.z); //1D global index int index = idx.z*dimy*dimx + idx.y*dimx + idx.x; //Check valid indices if (idx.x >= dimx || idx.y >= dimy || idx.z >= dimz) return; //Do computing out[index] = cuCsubf(inA[index], inB[index]); } //////////////////////////////////////////////////////////////////////////////////////////////////// void sub( float2* inA, float2* inB, float2* out, int dimx, int dimy, int dimz) { dim3 numBlocks( (dimx/blockDimx + ((dimx%blockDimx)?1:0)), (dimy/blockDimy + ((dimy%blockDimy)?1:0)), (dimz/blockDimz + ((dimz%blockDimz)?1:0)) ); dim3 numThreads(blockDimx, blockDimy, blockDimz); __sub<<<numBlocks, numThreads>>>(inA, inB, out, dimx, dimy, dimz); } //////////////////////////////////////////////////////////////////////////////////////////////////// }
8eddb7c22114d3137d261c27d3ecd546d62814d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define MAX 100 /* this GPU kernel function calculates a random number and stores it in the parameter */ __global__ void random(unsigned int seed, int* result) { /* CUDA's random number library uses hiprandState_t to keep track of the seed value we will store a random state for every thread */ hiprandState_t state; /* we have to initialize the state */ hiprand_init(seed, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); /* hiprand works like rand - except that it takes a state as a parameter */ *result = hiprand(&state) % MAX; } int main() { /* allocate an int on the GPU */ int* gpu_x; hipMalloc((void**)&gpu_x, sizeof(int)); /* invoke the GPU to initialize all of the random states */ random <<<1, 1 >> >(time(NULL), gpu_x); /* copy the random number back */ int x; hipMemcpy(&x, gpu_x, sizeof(int), hipMemcpyDeviceToHost); printf("Random number = %d.\n", x); /* free the memory we allocated */ hipFree(gpu_x); return 0; }
8eddb7c22114d3137d261c27d3ecd546d62814d3.cu
#include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <curand.h> #include <curand_kernel.h> #define MAX 100 /* this GPU kernel function calculates a random number and stores it in the parameter */ __global__ void random(unsigned int seed, int* result) { /* CUDA's random number library uses curandState_t to keep track of the seed value we will store a random state for every thread */ curandState_t state; /* we have to initialize the state */ curand_init(seed, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); /* curand works like rand - except that it takes a state as a parameter */ *result = curand(&state) % MAX; } int main() { /* allocate an int on the GPU */ int* gpu_x; cudaMalloc((void**)&gpu_x, sizeof(int)); /* invoke the GPU to initialize all of the random states */ random <<<1, 1 >> >(time(NULL), gpu_x); /* copy the random number back */ int x; cudaMemcpy(&x, gpu_x, sizeof(int), cudaMemcpyDeviceToHost); printf("Random number = %d.\n", x); /* free the memory we allocated */ cudaFree(gpu_x); return 0; }
88a32a31d7941622bb09682e22a533302f3428d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Using CPU + GPU heterogeneous programming to implement PCA algorithm on Minst * Capitalize the first letter to indicate the data on the GPU(device) */ #include "stdio.h" #include "../include/binary_IO.hpp" #include "../include/bitmap_IO.hpp" #include "../include/hpc_helpers.hpp" #define H2D (hipMemcpyHostToDevice) #define D2H (hipMemcpyDeviceToHost) #define COALESCED_ACCESS template < typename index_t, typename value_t> __global__ void compute_mean_kernel( value_t * Data, value_t * Mean, index_t num_entries, index_t num_features) { // Compute global thread identifier auto thid = blockDim.x*blockIdx.x + threadIdx.x; // Prevent memory access violations if( thid < num_features){ // accumulate in a fast register // not in slow global memory value_t accum = 0; // try unrolling the loop with // # pragma unroll 32 // for some additional performance for(index_t entry =0;entry < num_entries ;entry++){ accum += Data[entry*num_features + thid]; } // Write the register once to global memory Mean[thid] = accum / num_entries; } } template< typename index_t, typename value_t> __global__ void correction_kernel( value_t *Data, value_t *Mean, index_t num_entries, index_t num_features){ auto thid = blockDim.x*blockIdx.x + threadIdx.x; if(thid < num_features){ const value_t value = Mean[thid]; for(index_t entry =0;entry < num_entries;entry++){ Data[entry*num_features + thid] -= value; } } } template< typename index_t, typename value_t> __global__ void correction_kernel_ortho( value_t *Data, value_t *Mean, index_t num_entries, index_t num_features){ auto thid = blockDim.x*blockIdx.x + threadIdx.x; if(thid < num_features){ const value_t value = Mean[thid]; for(index_t feat =0;feat < num_entries;feat++){ Data[thid*num_features + feat] -= Mean[feat]; } } } template< typename index_t, typename value_t, uint32_t chunk_size = 32> __global__ void shared_covariance_kernel( value_t * Data, value_t * Cov, index_t num_entries, index_t num_features){ // first index in a window of width chunk size const index_t base_x = blockIdx.x * chunk_size; const index_t base_y = blockIdx.y * chunk_size; //local const index_t thid_y = threadIdx.y; const index_t thid_x = threadIdx.x; // global thread identifiers const index_t x = base_x + thid_x; const index_t y = base_y + thid_y; // optional early exit for tiles above the diagonal if(base_x > base_y) return; // allocate shared memory __shared__ value_t cache_x[chunk_size][chunk_size]; __shared__ value_t cache_y[chunk_size][chunk_size]; // compute the number of chunks to be computed const index_t num_chunks = SDIV(num_entries,chunk_size); // accumulated calue of scalar product value_t accum =0; // for each chunk for (index_t chunk = 0;chunk < num_chunks;chunk++){ //assign thread IDs to rows and columns const index_t row = thid_y + chunk*chunk_size; const index_t col_x = thid_x + base_x; const index_t col_y = thid_x + base_y; // check ig valid row or column indices const bool valid_row = row < num_entries; const bool valid_col_x = col_x < num_features; const bool valid_col_y = col_y < num_features; cache_x[thid_y][thid_x] = valid_row*valid_col_x?Data[row*num_features + col_x] : 0; cache_y[thid_y][thid_x] = valid_row*valid_col_y?Data[row*num_features + col_y] : 0; // Ensure all threads hava finished writing to shared memory __syncthreads(); // optional early exit if(x <= y){ // evaluate the scalar product for (index_t entry=0;entry<chunk_size;entry++){ accum += cache_y[entry][thid_y] * cache_x[entry][thid_x]; } } // ensure shared memory safety be overwritten __syncthreads(); } if( y < num_features && x<=y){ Cov[y*num_features + x] = Cov[x*num_features + y] = accum /num_entries; } } int main(int argc,char * argv[]){ // set the id of cuda device hipSetDevice(0); // 202599 grayscale images each of shape 55 x 45 constexpr uint64_t imgs = 202599, rows = 55, cols = 45; // pointer for data matrix and mean vector float * data =nullptr,* cov= nullptr; hipHostMalloc(&data,sizeof(float)*imgs*rows*cols); hipHostMalloc(&cov,sizeof(float)*rows*cols); //allocate storage on GPU float *Data = nullptr,*Mean = nullptr ,* Cov = nullptr; hipMalloc(&Data,sizeof(float)*imgs*rows*cols); hipMalloc(&Mean,sizeof(float)*rows*cols); hipMalloc(&Cov,sizeof(float)*rows*cols*rows*cols); //load data matrix from disk TIMERSTART(read_data_from_disk); std::string file_name = "./data/celebA_gray_lowres.202599_55_45_32.bin"; load_binary(data,imgs*rows*cols,file_name); TIMERSTOP(read_data_from_disk); // copy data to device and reset Mean TIMERSTART(data_H2D); hipMemcpy(Data,data,sizeof(float)*imgs*rows*cols,hipMemcpyHostToDevice); hipMemset(Mean,0,sizeof(float)*rows*cols); TIMERSTOP(data_H2D); //compute mean TIMERSTART(compute_mean_kernel); hipLaunchKernelGGL(( compute_mean_kernel), dim3(SDIV(rows*cols, 32)), dim3(32), 0, 0, Data, Mean, imgs, rows*cols); TIMERSTOP(compute_mean_kernel); // correct mean TIMERSTART(correction_kernel) #ifdef COALESCED_ACCESS hipLaunchKernelGGL(( correction_kernel), dim3(SDIV(rows*cols, 32)), dim3(32), 0, 0, Data, Mean, imgs, rows*cols); #else hipLaunchKernelGGL(( correction_kernel_ortho), dim3(SDIV(imgs, 32)), dim3(32), 0, 0, Data, Mean, imgs, rows*cols); #endif TIMERSTOP(correction_kernel) // compute covariance matrix TIMERSTART(covariance_kernel) dim3 blocks(SDIV(rows*cols, 32), SDIV(rows*cols, 32)); dim3 threads(32, 32, 1); hipLaunchKernelGGL(( shared_covariance_kernel), dim3(blocks), dim3(threads), 0, 0, Data, Cov, imgs, rows*cols); TIMERSTOP(covariance_kernel) // Transfer mean back to host TIMERSTART(cov_D2H) hipMemcpy(cov, Cov, sizeof(float)*rows*cols*rows*cols, hipMemcpyDeviceToHost); TIMERSTOP(cov_D2H) // Write mean image to disk TIMERSTART(write_mean_images_to_disk); dump_bitmap(cov,rows,cols,"./imgs/celebA_cov.bmp"); TIMERSTOP(write_mean_images_to_disk); // synchronize the GPU preventing premature termination hipDeviceSynchronize(); // Get rid of the memory hipHostFree(data); hipHostFree(cov); hipFree(Data); hipFree(Mean); hipFree(Cov); return 0; }
88a32a31d7941622bb09682e22a533302f3428d2.cu
/* * Using CPU + GPU heterogeneous programming to implement PCA algorithm on Minst * Capitalize the first letter to indicate the data on the GPU(device) */ #include "stdio.h" #include "../include/binary_IO.hpp" #include "../include/bitmap_IO.hpp" #include "../include/hpc_helpers.hpp" #define H2D (cudaMemcpyHostToDevice) #define D2H (cudaMemcpyDeviceToHost) #define COALESCED_ACCESS template < typename index_t, typename value_t> __global__ void compute_mean_kernel( value_t * Data, value_t * Mean, index_t num_entries, index_t num_features) { // Compute global thread identifier auto thid = blockDim.x*blockIdx.x + threadIdx.x; // Prevent memory access violations if( thid < num_features){ // accumulate in a fast register // not in slow global memory value_t accum = 0; // try unrolling the loop with // # pragma unroll 32 // for some additional performance for(index_t entry =0;entry < num_entries ;entry++){ accum += Data[entry*num_features + thid]; } // Write the register once to global memory Mean[thid] = accum / num_entries; } } template< typename index_t, typename value_t> __global__ void correction_kernel( value_t *Data, value_t *Mean, index_t num_entries, index_t num_features){ auto thid = blockDim.x*blockIdx.x + threadIdx.x; if(thid < num_features){ const value_t value = Mean[thid]; for(index_t entry =0;entry < num_entries;entry++){ Data[entry*num_features + thid] -= value; } } } template< typename index_t, typename value_t> __global__ void correction_kernel_ortho( value_t *Data, value_t *Mean, index_t num_entries, index_t num_features){ auto thid = blockDim.x*blockIdx.x + threadIdx.x; if(thid < num_features){ const value_t value = Mean[thid]; for(index_t feat =0;feat < num_entries;feat++){ Data[thid*num_features + feat] -= Mean[feat]; } } } template< typename index_t, typename value_t, uint32_t chunk_size = 32> __global__ void shared_covariance_kernel( value_t * Data, value_t * Cov, index_t num_entries, index_t num_features){ // first index in a window of width chunk size const index_t base_x = blockIdx.x * chunk_size; const index_t base_y = blockIdx.y * chunk_size; //local const index_t thid_y = threadIdx.y; const index_t thid_x = threadIdx.x; // global thread identifiers const index_t x = base_x + thid_x; const index_t y = base_y + thid_y; // optional early exit for tiles above the diagonal if(base_x > base_y) return; // allocate shared memory __shared__ value_t cache_x[chunk_size][chunk_size]; __shared__ value_t cache_y[chunk_size][chunk_size]; // compute the number of chunks to be computed const index_t num_chunks = SDIV(num_entries,chunk_size); // accumulated calue of scalar product value_t accum =0; // for each chunk for (index_t chunk = 0;chunk < num_chunks;chunk++){ //assign thread IDs to rows and columns const index_t row = thid_y + chunk*chunk_size; const index_t col_x = thid_x + base_x; const index_t col_y = thid_x + base_y; // check ig valid row or column indices const bool valid_row = row < num_entries; const bool valid_col_x = col_x < num_features; const bool valid_col_y = col_y < num_features; cache_x[thid_y][thid_x] = valid_row*valid_col_x?Data[row*num_features + col_x] : 0; cache_y[thid_y][thid_x] = valid_row*valid_col_y?Data[row*num_features + col_y] : 0; // Ensure all threads hava finished writing to shared memory __syncthreads(); // optional early exit if(x <= y){ // evaluate the scalar product for (index_t entry=0;entry<chunk_size;entry++){ accum += cache_y[entry][thid_y] * cache_x[entry][thid_x]; } } // ensure shared memory safety be overwritten __syncthreads(); } if( y < num_features && x<=y){ Cov[y*num_features + x] = Cov[x*num_features + y] = accum /num_entries; } } int main(int argc,char * argv[]){ // set the id of cuda device cudaSetDevice(0); // 202599 grayscale images each of shape 55 x 45 constexpr uint64_t imgs = 202599, rows = 55, cols = 45; // pointer for data matrix and mean vector float * data =nullptr,* cov= nullptr; cudaMallocHost(&data,sizeof(float)*imgs*rows*cols); cudaMallocHost(&cov,sizeof(float)*rows*cols); //allocate storage on GPU float *Data = nullptr,*Mean = nullptr ,* Cov = nullptr; cudaMalloc(&Data,sizeof(float)*imgs*rows*cols); cudaMalloc(&Mean,sizeof(float)*rows*cols); cudaMalloc(&Cov,sizeof(float)*rows*cols*rows*cols); //load data matrix from disk TIMERSTART(read_data_from_disk); std::string file_name = "./data/celebA_gray_lowres.202599_55_45_32.bin"; load_binary(data,imgs*rows*cols,file_name); TIMERSTOP(read_data_from_disk); // copy data to device and reset Mean TIMERSTART(data_H2D); cudaMemcpy(Data,data,sizeof(float)*imgs*rows*cols,cudaMemcpyHostToDevice); cudaMemset(Mean,0,sizeof(float)*rows*cols); TIMERSTOP(data_H2D); //compute mean TIMERSTART(compute_mean_kernel); compute_mean_kernel<<<SDIV(rows*cols, 32), 32>>> (Data, Mean, imgs, rows*cols); TIMERSTOP(compute_mean_kernel); // correct mean TIMERSTART(correction_kernel) #ifdef COALESCED_ACCESS correction_kernel<<<SDIV(rows*cols, 32), 32>>> (Data, Mean, imgs, rows*cols); #else correction_kernel_ortho<<<SDIV(imgs, 32), 32>>> (Data, Mean, imgs, rows*cols); #endif TIMERSTOP(correction_kernel) // compute covariance matrix TIMERSTART(covariance_kernel) dim3 blocks(SDIV(rows*cols, 32), SDIV(rows*cols, 32)); dim3 threads(32, 32, 1); shared_covariance_kernel<<<blocks, threads>>> (Data, Cov, imgs, rows*cols); TIMERSTOP(covariance_kernel) // Transfer mean back to host TIMERSTART(cov_D2H) cudaMemcpy(cov, Cov, sizeof(float)*rows*cols*rows*cols, cudaMemcpyDeviceToHost); TIMERSTOP(cov_D2H) // Write mean image to disk TIMERSTART(write_mean_images_to_disk); dump_bitmap(cov,rows,cols,"./imgs/celebA_cov.bmp"); TIMERSTOP(write_mean_images_to_disk); // synchronize the GPU preventing premature termination cudaDeviceSynchronize(); // Get rid of the memory cudaFreeHost(data); cudaFreeHost(cov); cudaFree(Data); cudaFree(Mean); cudaFree(Cov); return 0; }
0c3c6012e519ffdd2c946d4995adc19f6594ae23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MAC_Grid_3D.cuh" __global__ void setFluidIndex(VolumeCollection volumes, int cellCount, unsigned int* fluidCount, int sizeX, int sizeY, int sizeZ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x > sizeX || y > sizeY || z > sizeZ) return; if (volumes.content.readSurface<int>(x,y,z) == CONTENT_FLUID) { int thisIndex = atomicInc(fluidCount, cellCount); volumes.content.writeSurface<int>(thisIndex, x, y, z); } } __global__ void writeSpeed(VolumeCollection volumes, int cellCount, float* speedX, float* speedY, float* speedZ,int sizeX, int sizeY, int sizeZ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x > sizeX || y > sizeY || z > sizeZ) return; float4 velocity = volumes.velocity.readSurface<float4>(x, y, z); get3D(speedX, x, y, z) = abs(velocity.x); get3D(speedY, x, y, z) = abs(velocity.y); get3D(speedZ, x, y, z) = abs(velocity.z); } MAC_Grid_3D::MAC_Grid_3D(int X, int Y, int Z, float cellPhysicalSize_) : sizeX(X), sizeY(Y), sizeZ(Z), cellCount((X + 1)* (Y + 1)* (Z + 1)), cellPhysicalSize(cellPhysicalSize_), physicalSizeX(X* cellPhysicalSize), physicalSizeY(Y* cellPhysicalSize) { cudaGridSize = dim3(divUp(sizeX+1, cudaBlockSize.x), divUp(sizeY+1, cudaBlockSize.y), divUp(sizeZ+1, cudaBlockSize.z)); std::cout << "cudaGridSize " << cudaGridSize.x << " " << cudaGridSize.y << " " << cudaGridSize.z << std::endl; volumes.content = createField3D<int>(sizeX + 1, sizeY + 1, sizeZ + 1, cudaGridSize,cudaBlockSize,CONTENT_AIR,false); volumes.pressure = createField3D<float>(sizeX , sizeY, sizeZ , cudaGridSize, cudaBlockSize, 0.f,false); volumes.fluidIndex = createField3D<int>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, 0, false); volumes.divergence = createField3D<float>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0.f, false); volumes.particleCount = createField3D<int>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0, false); volumes.velocityAccumWeight = createField3D<float4>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, make_float4(0,0,0,0), false); volumes.hasVelocity = createField3D<int4>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, make_int4(0, 0, 0, 0), false); volumes.velocity = createField3D<float4>(sizeX+1, sizeY+1, sizeZ+1, cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), true); volumes.newVelocity = createField3D<float4>(sizeX + 1, sizeY + 1, sizeZ + 1, cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), true); volumes.volumeFractions = createField3D<float4>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), false); volumes.newVolumeFractions = createField3D<float4>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), false); volumes.density = createField3D<float>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0.f, false); updateFluidCount(); } void MAC_Grid_3D::updateFluidCount() { unsigned int* fluidCountDevice; HANDLE_ERROR(hipMalloc(&fluidCountDevice, sizeof(*fluidCountDevice))); HANDLE_ERROR(hipMemset(fluidCountDevice, 0,sizeof(*fluidCountDevice))); hipLaunchKernelGGL(( setFluidIndex) , dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, volumes, cellCount, fluidCountDevice,sizeX,sizeY,sizeZ); CHECK_CUDA_ERROR("set fluid index"); HANDLE_ERROR(hipMemcpy(&fluidCount, fluidCountDevice, sizeof(fluidCount), hipMemcpyDeviceToHost)); std::cout << "current fluid cell count: " << fluidCount << std::endl; HANDLE_ERROR(hipFree(fluidCountDevice)); } float MAC_Grid_3D::getMaxSpeed() { float* speedX; float* speedY; float* speedZ; HANDLE_ERROR(hipMalloc(&speedX, cellCount * sizeof(*speedX))); HANDLE_ERROR(hipMalloc(&speedY, cellCount * sizeof(*speedY))); HANDLE_ERROR(hipMalloc(&speedZ, cellCount * sizeof(*speedZ))); writeSpeed <<<cudaGridSize, cudaBlockSize >> > (volumes, cellCount, speedX,speedY,speedZ,sizeX,sizeY,sizeZ); CHECK_CUDA_ERROR("write speed"); float maxX = thrust::reduce(thrust::device, speedX, speedX + cellCount, 0, thrust::maximum<float>()); float maxY = thrust::reduce(thrust::device, speedY, speedY + cellCount, 0, thrust::maximum<float>()); float maxZ = thrust::reduce(thrust::device, speedZ, speedZ + cellCount, 0, thrust::maximum<float>()); float maxSpeed = max(max(maxX, maxY), maxZ) * sqrt(3); HANDLE_ERROR(hipFree(speedX)); HANDLE_ERROR(hipFree(speedY)); HANDLE_ERROR(hipFree(speedZ)); return maxSpeed; } MAC_Grid_3D::~MAC_Grid_3D() { releaseField3D(volumes. content); releaseField3D(volumes. pressure); releaseField3D(volumes. fluidIndex); releaseField3D(volumes. divergence); releaseField3D(volumes. particleCount); releaseField3D(volumes. velocityAccumWeight); releaseField3D(volumes. hasVelocity); releaseField3D(volumes. velocity); releaseField3D(volumes. newVelocity); releaseField3D(volumes. volumeFractions); releaseField3D(volumes. newVolumeFractions); }
0c3c6012e519ffdd2c946d4995adc19f6594ae23.cu
#include "MAC_Grid_3D.cuh" __global__ void setFluidIndex(VolumeCollection volumes, int cellCount, unsigned int* fluidCount, int sizeX, int sizeY, int sizeZ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x > sizeX || y > sizeY || z > sizeZ) return; if (volumes.content.readSurface<int>(x,y,z) == CONTENT_FLUID) { int thisIndex = atomicInc(fluidCount, cellCount); volumes.content.writeSurface<int>(thisIndex, x, y, z); } } __global__ void writeSpeed(VolumeCollection volumes, int cellCount, float* speedX, float* speedY, float* speedZ,int sizeX, int sizeY, int sizeZ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x > sizeX || y > sizeY || z > sizeZ) return; float4 velocity = volumes.velocity.readSurface<float4>(x, y, z); get3D(speedX, x, y, z) = abs(velocity.x); get3D(speedY, x, y, z) = abs(velocity.y); get3D(speedZ, x, y, z) = abs(velocity.z); } MAC_Grid_3D::MAC_Grid_3D(int X, int Y, int Z, float cellPhysicalSize_) : sizeX(X), sizeY(Y), sizeZ(Z), cellCount((X + 1)* (Y + 1)* (Z + 1)), cellPhysicalSize(cellPhysicalSize_), physicalSizeX(X* cellPhysicalSize), physicalSizeY(Y* cellPhysicalSize) { cudaGridSize = dim3(divUp(sizeX+1, cudaBlockSize.x), divUp(sizeY+1, cudaBlockSize.y), divUp(sizeZ+1, cudaBlockSize.z)); std::cout << "cudaGridSize " << cudaGridSize.x << " " << cudaGridSize.y << " " << cudaGridSize.z << std::endl; volumes.content = createField3D<int>(sizeX + 1, sizeY + 1, sizeZ + 1, cudaGridSize,cudaBlockSize,CONTENT_AIR,false); volumes.pressure = createField3D<float>(sizeX , sizeY, sizeZ , cudaGridSize, cudaBlockSize, 0.f,false); volumes.fluidIndex = createField3D<int>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, 0, false); volumes.divergence = createField3D<float>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0.f, false); volumes.particleCount = createField3D<int>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0, false); volumes.velocityAccumWeight = createField3D<float4>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, make_float4(0,0,0,0), false); volumes.hasVelocity = createField3D<int4>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, make_int4(0, 0, 0, 0), false); volumes.velocity = createField3D<float4>(sizeX+1, sizeY+1, sizeZ+1, cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), true); volumes.newVelocity = createField3D<float4>(sizeX + 1, sizeY + 1, sizeZ + 1, cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), true); volumes.volumeFractions = createField3D<float4>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), false); volumes.newVolumeFractions = createField3D<float4>(sizeX , sizeY , sizeZ , cudaGridSize, cudaBlockSize, make_float4(0, 0, 0, 0), false); volumes.density = createField3D<float>(sizeX, sizeY, sizeZ, cudaGridSize, cudaBlockSize, 0.f, false); updateFluidCount(); } void MAC_Grid_3D::updateFluidCount() { unsigned int* fluidCountDevice; HANDLE_ERROR(cudaMalloc(&fluidCountDevice, sizeof(*fluidCountDevice))); HANDLE_ERROR(cudaMemset(fluidCountDevice, 0,sizeof(*fluidCountDevice))); setFluidIndex <<<cudaGridSize,cudaBlockSize>>> (volumes, cellCount, fluidCountDevice,sizeX,sizeY,sizeZ); CHECK_CUDA_ERROR("set fluid index"); HANDLE_ERROR(cudaMemcpy(&fluidCount, fluidCountDevice, sizeof(fluidCount), cudaMemcpyDeviceToHost)); std::cout << "current fluid cell count: " << fluidCount << std::endl; HANDLE_ERROR(cudaFree(fluidCountDevice)); } float MAC_Grid_3D::getMaxSpeed() { float* speedX; float* speedY; float* speedZ; HANDLE_ERROR(cudaMalloc(&speedX, cellCount * sizeof(*speedX))); HANDLE_ERROR(cudaMalloc(&speedY, cellCount * sizeof(*speedY))); HANDLE_ERROR(cudaMalloc(&speedZ, cellCount * sizeof(*speedZ))); writeSpeed <<<cudaGridSize, cudaBlockSize >> > (volumes, cellCount, speedX,speedY,speedZ,sizeX,sizeY,sizeZ); CHECK_CUDA_ERROR("write speed"); float maxX = thrust::reduce(thrust::device, speedX, speedX + cellCount, 0, thrust::maximum<float>()); float maxY = thrust::reduce(thrust::device, speedY, speedY + cellCount, 0, thrust::maximum<float>()); float maxZ = thrust::reduce(thrust::device, speedZ, speedZ + cellCount, 0, thrust::maximum<float>()); float maxSpeed = max(max(maxX, maxY), maxZ) * sqrt(3); HANDLE_ERROR(cudaFree(speedX)); HANDLE_ERROR(cudaFree(speedY)); HANDLE_ERROR(cudaFree(speedZ)); return maxSpeed; } MAC_Grid_3D::~MAC_Grid_3D() { releaseField3D(volumes. content); releaseField3D(volumes. pressure); releaseField3D(volumes. fluidIndex); releaseField3D(volumes. divergence); releaseField3D(volumes. particleCount); releaseField3D(volumes. velocityAccumWeight); releaseField3D(volumes. hasVelocity); releaseField3D(volumes. velocity); releaseField3D(volumes. newVelocity); releaseField3D(volumes. volumeFractions); releaseField3D(volumes. newVolumeFractions); }
5ef3f43dc0b7a823b67b0e8746d1d2935be3d2e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> __global__ void star(char *a, int *n) { int i, j, t; int k = *n; int tid=threadIdx.x; j = k - tid - 1; t = (tid + 1) * 2 - 1; for(i = 0; i < t; i++){ a[tid * (k * 2 - 1) + j + i] = '*'; } } int main(void) { int i, N, j; scanf("%d", &N); int l = 2 * N - 1; char a[l][l]; char *d_a; int *d_b; int size=sizeof(char); int s = sizeof(int); hipMalloc((void **)&d_a,size*l*l); hipMalloc((void **)&d_b,s); hipMemcpy(d_a,&a,size*l*l,hipMemcpyHostToDevice); hipMemcpy(d_b,&N,s,hipMemcpyHostToDevice); hipLaunchKernelGGL(( star), dim3(1),dim3(N), 0, 0, d_a,d_b); hipMemcpy(a,d_a,size*l*l,hipMemcpyDeviceToHost); for(i=0;i<l;i++) { for(j = 0; j < l; j++){ if(a[i][j] == '*'){ printf("%c", a[i][j]); }else{ printf(" "); } } printf("\n"); } return 0; }
5ef3f43dc0b7a823b67b0e8746d1d2935be3d2e2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> __global__ void star(char *a, int *n) { int i, j, t; int k = *n; int tid=threadIdx.x; j = k - tid - 1; t = (tid + 1) * 2 - 1; for(i = 0; i < t; i++){ a[tid * (k * 2 - 1) + j + i] = '*'; } } int main(void) { int i, N, j; scanf("%d", &N); int l = 2 * N - 1; char a[l][l]; char *d_a; int *d_b; int size=sizeof(char); int s = sizeof(int); cudaMalloc((void **)&d_a,size*l*l); cudaMalloc((void **)&d_b,s); cudaMemcpy(d_a,&a,size*l*l,cudaMemcpyHostToDevice); cudaMemcpy(d_b,&N,s,cudaMemcpyHostToDevice); star<<<1,N>>>(d_a,d_b); cudaMemcpy(a,d_a,size*l*l,cudaMemcpyDeviceToHost); for(i=0;i<l;i++) { for(j = 0; j < l; j++){ if(a[i][j] == '*'){ printf("%c", a[i][j]); }else{ printf(" "); } } printf("\n"); } return 0; }
996be97a7fda2328fee32d032e420cea6bcb3a2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add( int *a, int *b, int *c ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // add vectors together } }
996be97a7fda2328fee32d032e420cea6bcb3a2d.cu
#include "includes.h" __global__ void add( int *a, int *b, int *c ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // add vectors together } }
f974e05f127e6db9282aeca4f046879b7fb85e21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <conf.h> #include "inc/conf.h" #include "inc/def.h" #include "utils/msg.h" #include "d/q.h" #include "d/ker.h" #include "d/api.h" #include "utils/cc.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/kl.h" #include "imp.h" enum { MAXTHREADS = 1024, WARPSIZE = 32 }; static const float MINV = -100000000.; static const float MAXV = 100000000.; typedef struct { int g_block_id; int g_blockcnt; float3 minval; float3 maxval; } sblockds_t; __global__ void minmaxob(const Particle * const d_data, float3 *d_min, float3 *d_max, int size) { __shared__ float3 mintemp[32]; __shared__ float3 maxtemp[32]; __shared__ float shrtmp[3][MAXTHREADS]; float3 mintemp1, maxtemp1; float3 mindef, maxdef; float temp2; mindef.x=MAXV; mindef.y=MAXV; mindef.z=MAXV; maxdef.x=MINV; maxdef.y=MINV; maxdef.z=MINV; __syncthreads(); int tid = threadIdx.x; int xyz; for(int i=tid; i<3*blockDim.x; i+=blockDim.x) { xyz=i%3; shrtmp[xyz][i/3] = (i/3<size)?d_data[i/3+blockIdx.x*size].r[xyz]:MINV; } __syncthreads(); mintemp1.x = (tid<size)?shrtmp[0][tid]:MAXV; mintemp1.y = (tid<size)?shrtmp[1][tid]:MAXV; mintemp1.z = (tid<size)?shrtmp[2][tid]:MAXV; maxtemp1.x = (tid<size)?shrtmp[0][tid]:MINV; maxtemp1.y = (tid<size)?shrtmp[1][tid]:MINV; maxtemp1.z = (tid<size)?shrtmp[2][tid]:MINV; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid%32 == 31) { mintemp[tid/32] = mintemp1; maxtemp[tid/32] = maxtemp1; } __syncthreads(); if (threadIdx.x < 32) { mintemp1= (tid < blockDim.x/32)?mintemp[threadIdx.x]:mindef; maxtemp1= (tid < blockDim.x/32)?maxtemp[threadIdx.x]:maxdef; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid < blockDim.x/32) { mintemp[tid] = mintemp1; maxtemp[tid] = maxtemp1; } } __syncthreads(); if (threadIdx.x==blockDim.x-1) { d_min[blockIdx.x]=mintemp[blockDim.x/32-1]; d_max[blockIdx.x]=maxtemp[blockDim.x/32-1]; } } __global__ void minmaxmba(const Particle *d_data, float3 *d_min, float3 *d_max, int size, sblockds_t *ptoblockds) { __shared__ float3 mintemp[32]; __shared__ float3 maxtemp[32]; __shared__ float shrtmp[3][MAXTHREADS]; __shared__ unsigned int my_blockId; const int which=blockIdx.x/((size+blockDim.x-1)/blockDim.x); /* which particle should manage */ float3 mintemp1, maxtemp1; float3 mindef, maxdef; float temp2; if (threadIdx.x==0) { my_blockId = atomicAdd( &(ptoblockds[which].g_block_id), 1 ); } mindef.x=MAXV; mindef.y=MAXV; mindef.z=MAXV; maxdef.x=MINV; maxdef.y=MINV; maxdef.z=MINV; __syncthreads(); int tid = threadIdx.x; int xyz; for(int i=tid; i<3*blockDim.x; i+=blockDim.x) { xyz=i%3; shrtmp[xyz][i/3] = (i/3+my_blockId*blockDim.x<size)?d_data[i/3+my_blockId*blockDim.x+which*size].r[xyz]:MINV; } __syncthreads(); mintemp1.x = (tid+my_blockId*blockDim.x<size)?shrtmp[0][tid]:MAXV; mintemp1.y = (tid+my_blockId*blockDim.x<size)?shrtmp[1][tid]:MAXV; mintemp1.z = (tid+my_blockId*blockDim.x<size)?shrtmp[2][tid]:MAXV; maxtemp1.x = (tid+my_blockId*blockDim.x<size)?shrtmp[0][tid]:MINV; maxtemp1.y = (tid+my_blockId*blockDim.x<size)?shrtmp[1][tid]:MINV; maxtemp1.z = (tid+my_blockId*blockDim.x<size)?shrtmp[2][tid]:MINV; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid%32 == 31) { mintemp[tid/32] = mintemp1; maxtemp[tid/32] = maxtemp1; } __syncthreads(); if (threadIdx.x < 32) { mintemp1= (tid < blockDim.x/32)?mintemp[threadIdx.x]:mindef; maxtemp1= (tid < blockDim.x/32)?maxtemp[threadIdx.x]:maxdef; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid < blockDim.x/32) { mintemp[tid] = mintemp1; maxtemp[tid] = maxtemp1; } } __syncthreads(); mintemp1=mintemp[blockDim.x/32-1]; maxtemp1=maxtemp[blockDim.x/32-1]; if (threadIdx.x==(blockDim.x-1)) { do {} while( atomicAdd(&(ptoblockds[which].g_blockcnt),0) < my_blockId ); mintemp1.x=(ptoblockds[which].minval.x<mintemp1.x)?ptoblockds[which].minval.x:mintemp1.x; maxtemp1.x=(ptoblockds[which].maxval.x>maxtemp1.x)?ptoblockds[which].maxval.x:maxtemp1.x; mintemp1.y=(ptoblockds[which].minval.y<mintemp1.y)?ptoblockds[which].minval.y:mintemp1.y; maxtemp1.y=(ptoblockds[which].maxval.y>maxtemp1.y)?ptoblockds[which].maxval.y:maxtemp1.y; mintemp1.z=(ptoblockds[which].minval.z<mintemp1.z)?ptoblockds[which].minval.z:mintemp1.z; maxtemp1.z=(ptoblockds[which].maxval.z>maxtemp1.z)?ptoblockds[which].maxval.z:maxtemp1.z; if(my_blockId==(((size+blockDim.x-1)/blockDim.x))-1) { /* it is the last block; reset for next iteration */ ptoblockds[which].minval=mindef; ptoblockds[which].maxval=maxdef; ptoblockds[which].g_blockcnt=0; ptoblockds[which].g_block_id=0; d_min[which]=mintemp1; d_max[which]=maxtemp1; } else { ptoblockds[which].minval=mintemp1; ptoblockds[which].maxval=maxtemp1; atomicAdd(&(ptoblockds[which].g_blockcnt),1); } } } void minmax(const Particle * const rbc, int size, int n, float3 *minrbc, float3 *maxrbc) { const int size32 = ((size + 31) / 32) * 32; if (size32 < MAXTHREADS) KL(minmaxob, (n, size32), (rbc, minrbc, maxrbc, size)); else { static int nctc = -1; static sblockds_t *ptoblockds = NULL; if( n > nctc) { sblockds_t * h_ptoblockds = new sblockds_t[n]; for(int i=0; i < n; i++) { h_ptoblockds[i].g_block_id=0; h_ptoblockds[i].g_blockcnt=0; h_ptoblockds[i].minval.x=MAXV; h_ptoblockds[i].maxval.x=MINV; h_ptoblockds[i].minval.y=MAXV; h_ptoblockds[i].maxval.y=MINV; h_ptoblockds[i].minval.z=MAXV; h_ptoblockds[i].maxval.z=MINV; } if (ptoblockds != NULL) CC(d::Free(ptoblockds)); CC(d::Malloc((void **)&ptoblockds,sizeof(sblockds_t) * n)); cH2D(ptoblockds, h_ptoblockds, n); delete [] h_ptoblockds; } int nblocks= n * ((size + MAXTHREADS - 1) / MAXTHREADS); KL(minmaxmba, (nblocks, MAXTHREADS), (rbc, minrbc, maxrbc, size, ptoblockds)); } }
f974e05f127e6db9282aeca4f046879b7fb85e21.cu
#include <stdio.h> #include <conf.h> #include "inc/conf.h" #include "inc/def.h" #include "utils/msg.h" #include "d/q.h" #include "d/ker.h" #include "d/api.h" #include "utils/cc.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/kl.h" #include "imp.h" enum { MAXTHREADS = 1024, WARPSIZE = 32 }; static const float MINV = -100000000.; static const float MAXV = 100000000.; typedef struct { int g_block_id; int g_blockcnt; float3 minval; float3 maxval; } sblockds_t; __global__ void minmaxob(const Particle * const d_data, float3 *d_min, float3 *d_max, int size) { __shared__ float3 mintemp[32]; __shared__ float3 maxtemp[32]; __shared__ float shrtmp[3][MAXTHREADS]; float3 mintemp1, maxtemp1; float3 mindef, maxdef; float temp2; mindef.x=MAXV; mindef.y=MAXV; mindef.z=MAXV; maxdef.x=MINV; maxdef.y=MINV; maxdef.z=MINV; __syncthreads(); int tid = threadIdx.x; int xyz; for(int i=tid; i<3*blockDim.x; i+=blockDim.x) { xyz=i%3; shrtmp[xyz][i/3] = (i/3<size)?d_data[i/3+blockIdx.x*size].r[xyz]:MINV; } __syncthreads(); mintemp1.x = (tid<size)?shrtmp[0][tid]:MAXV; mintemp1.y = (tid<size)?shrtmp[1][tid]:MAXV; mintemp1.z = (tid<size)?shrtmp[2][tid]:MAXV; maxtemp1.x = (tid<size)?shrtmp[0][tid]:MINV; maxtemp1.y = (tid<size)?shrtmp[1][tid]:MINV; maxtemp1.z = (tid<size)?shrtmp[2][tid]:MINV; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid%32 == 31) { mintemp[tid/32] = mintemp1; maxtemp[tid/32] = maxtemp1; } __syncthreads(); if (threadIdx.x < 32) { mintemp1= (tid < blockDim.x/32)?mintemp[threadIdx.x]:mindef; maxtemp1= (tid < blockDim.x/32)?maxtemp[threadIdx.x]:maxdef; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid < blockDim.x/32) { mintemp[tid] = mintemp1; maxtemp[tid] = maxtemp1; } } __syncthreads(); if (threadIdx.x==blockDim.x-1) { d_min[blockIdx.x]=mintemp[blockDim.x/32-1]; d_max[blockIdx.x]=maxtemp[blockDim.x/32-1]; } } __global__ void minmaxmba(const Particle *d_data, float3 *d_min, float3 *d_max, int size, sblockds_t *ptoblockds) { __shared__ float3 mintemp[32]; __shared__ float3 maxtemp[32]; __shared__ float shrtmp[3][MAXTHREADS]; __shared__ unsigned int my_blockId; const int which=blockIdx.x/((size+blockDim.x-1)/blockDim.x); /* which particle should manage */ float3 mintemp1, maxtemp1; float3 mindef, maxdef; float temp2; if (threadIdx.x==0) { my_blockId = atomicAdd( &(ptoblockds[which].g_block_id), 1 ); } mindef.x=MAXV; mindef.y=MAXV; mindef.z=MAXV; maxdef.x=MINV; maxdef.y=MINV; maxdef.z=MINV; __syncthreads(); int tid = threadIdx.x; int xyz; for(int i=tid; i<3*blockDim.x; i+=blockDim.x) { xyz=i%3; shrtmp[xyz][i/3] = (i/3+my_blockId*blockDim.x<size)?d_data[i/3+my_blockId*blockDim.x+which*size].r[xyz]:MINV; } __syncthreads(); mintemp1.x = (tid+my_blockId*blockDim.x<size)?shrtmp[0][tid]:MAXV; mintemp1.y = (tid+my_blockId*blockDim.x<size)?shrtmp[1][tid]:MAXV; mintemp1.z = (tid+my_blockId*blockDim.x<size)?shrtmp[2][tid]:MAXV; maxtemp1.x = (tid+my_blockId*blockDim.x<size)?shrtmp[0][tid]:MINV; maxtemp1.y = (tid+my_blockId*blockDim.x<size)?shrtmp[1][tid]:MINV; maxtemp1.z = (tid+my_blockId*blockDim.x<size)?shrtmp[2][tid]:MINV; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid%32 == 31) { mintemp[tid/32] = mintemp1; maxtemp[tid/32] = maxtemp1; } __syncthreads(); if (threadIdx.x < 32) { mintemp1= (tid < blockDim.x/32)?mintemp[threadIdx.x]:mindef; maxtemp1= (tid < blockDim.x/32)?maxtemp[threadIdx.x]:maxdef; for (int d=1; d<32; d<<=1) { temp2 = __shfl_up(mintemp1.x,d); mintemp1.x=(mintemp1.x>temp2)?temp2:mintemp1.x; temp2 = __shfl_up(mintemp1.y,d); mintemp1.y=(mintemp1.y>temp2)?temp2:mintemp1.y; temp2 = __shfl_up(mintemp1.z,d); mintemp1.z=(mintemp1.z>temp2)?temp2:mintemp1.z; temp2 = __shfl_up(maxtemp1.x,d); maxtemp1.x=(maxtemp1.x<temp2)?temp2:maxtemp1.x; temp2 = __shfl_up(maxtemp1.y,d); maxtemp1.y=(maxtemp1.y<temp2)?temp2:maxtemp1.y; temp2 = __shfl_up(maxtemp1.z,d); maxtemp1.z=(maxtemp1.z<temp2)?temp2:maxtemp1.z; } if (tid < blockDim.x/32) { mintemp[tid] = mintemp1; maxtemp[tid] = maxtemp1; } } __syncthreads(); mintemp1=mintemp[blockDim.x/32-1]; maxtemp1=maxtemp[blockDim.x/32-1]; if (threadIdx.x==(blockDim.x-1)) { do {} while( atomicAdd(&(ptoblockds[which].g_blockcnt),0) < my_blockId ); mintemp1.x=(ptoblockds[which].minval.x<mintemp1.x)?ptoblockds[which].minval.x:mintemp1.x; maxtemp1.x=(ptoblockds[which].maxval.x>maxtemp1.x)?ptoblockds[which].maxval.x:maxtemp1.x; mintemp1.y=(ptoblockds[which].minval.y<mintemp1.y)?ptoblockds[which].minval.y:mintemp1.y; maxtemp1.y=(ptoblockds[which].maxval.y>maxtemp1.y)?ptoblockds[which].maxval.y:maxtemp1.y; mintemp1.z=(ptoblockds[which].minval.z<mintemp1.z)?ptoblockds[which].minval.z:mintemp1.z; maxtemp1.z=(ptoblockds[which].maxval.z>maxtemp1.z)?ptoblockds[which].maxval.z:maxtemp1.z; if(my_blockId==(((size+blockDim.x-1)/blockDim.x))-1) { /* it is the last block; reset for next iteration */ ptoblockds[which].minval=mindef; ptoblockds[which].maxval=maxdef; ptoblockds[which].g_blockcnt=0; ptoblockds[which].g_block_id=0; d_min[which]=mintemp1; d_max[which]=maxtemp1; } else { ptoblockds[which].minval=mintemp1; ptoblockds[which].maxval=maxtemp1; atomicAdd(&(ptoblockds[which].g_blockcnt),1); } } } void minmax(const Particle * const rbc, int size, int n, float3 *minrbc, float3 *maxrbc) { const int size32 = ((size + 31) / 32) * 32; if (size32 < MAXTHREADS) KL(minmaxob, (n, size32), (rbc, minrbc, maxrbc, size)); else { static int nctc = -1; static sblockds_t *ptoblockds = NULL; if( n > nctc) { sblockds_t * h_ptoblockds = new sblockds_t[n]; for(int i=0; i < n; i++) { h_ptoblockds[i].g_block_id=0; h_ptoblockds[i].g_blockcnt=0; h_ptoblockds[i].minval.x=MAXV; h_ptoblockds[i].maxval.x=MINV; h_ptoblockds[i].minval.y=MAXV; h_ptoblockds[i].maxval.y=MINV; h_ptoblockds[i].minval.z=MAXV; h_ptoblockds[i].maxval.z=MINV; } if (ptoblockds != NULL) CC(d::Free(ptoblockds)); CC(d::Malloc((void **)&ptoblockds,sizeof(sblockds_t) * n)); cH2D(ptoblockds, h_ptoblockds, n); delete [] h_ptoblockds; } int nblocks= n * ((size + MAXTHREADS - 1) / MAXTHREADS); KL(minmaxmba, (nblocks, MAXTHREADS), (rbc, minrbc, maxrbc, size, ptoblockds)); } }
dcd8f9e05c065d3ac9fdbd9788296ee03edeacf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <cstdio> #include <random> using namespace std; #define MAX_IT 1 #define N (1024 * 1024) #define N_CLUSTERS 6 #define BLOCKDIM 1024 #define BLOCKDIM2 ((N + BLOCKDIM - 1) / BLOCKDIM) #define CUDA_CALL(F, ...)\ if((F(__VA_ARGS__)) != hipSuccess){\ hipError_t e = hipGetLastError();\ printf("CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e));\ return(EXIT_FAILURE);\ } #define CURAND_CALL(F, ...)\ if((F(__VA_ARGS__)) != HIPRAND_STATUS_SUCCESS){\ hipError_t e = hipGetLastError();\ if(e != hipSuccess){\ printf("CuRAND failure %s:%d: '%s'\n",__FILE__,__LINE__, hipGetErrorString(e));\ }\ return(EXIT_FAILURE);\ } #define PRINT_1D_I(A, S)\ printf("[");\ for(int i = 0; i < S; i++){\ printf("%d, ", A[i]);\ }\ printf("]\n"); #define PRINT_1D_F(A, S)\ printf("[");\ for(int i = 0; i < S; i++){\ printf("%f, ", A[i]);\ }\ printf("]\n"); #define FILL_1D(A, S, V)\ for(int i = 0; i < S; i++){\ A[i] = V;\ } #define PRINT_FLAT2D(A, WIDTH, HEIGHT)\ printf("[\n");\ for(int i = 0; i < WIDTH; i++){\ printf("[");\ for(int j = 0; j < HEIGHT; j++){\ printf("%f, ", A[i + j * WIDTH]);\ }\ printf("]\n");\ }\ printf("]\n"); struct Cluster{ float sum; int count; __device__ inline void operator += (Cluster& a){ this->count += a.count; this->sum += a.sum; } }; texture<float, 1, hipReadModeElementType> tex; __device__ Cluster clusters_d[N_CLUSTERS * ((N + BLOCKDIM - 1) / BLOCKDIM)]; __device__ float euclidianDist(const float a, const float b){ return fabsf(a - b); } __global__ void relabel_k(const float* clusters, int* labels){ extern __shared__ Cluster _clusters[]; int tid = threadIdx.x; int pos = threadIdx.x + blockIdx.x * blockDim.x; #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ Cluster cluster; cluster.sum = 0.0f; cluster.count = 0; _clusters[N_CLUSTERS * tid + c] = cluster; } __syncthreads(); if(pos < N){ float minDist = 1.0f; int clusterIndex = 0; float val = tex1Dfetch(tex, pos); #pragma unroll for(int c = 0; c < N_CLUSTERS; c++){ float dist = euclidianDist(val, clusters[c]); if(dist <= minDist){ clusterIndex = c; minDist = dist; } } labels[pos] = clusterIndex; _clusters[N_CLUSTERS * tid + clusterIndex].sum = val; _clusters[N_CLUSTERS * tid + clusterIndex].count = 1; } __syncthreads(); #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c += 2){ #pragma unroll for(unsigned int stride = BLOCKDIM >> 1; stride > 0; stride >>= 1){ if(tid < stride){ _clusters[N_CLUSTERS * tid + c] += _clusters[N_CLUSTERS * (tid + stride) + c]; } else if(c + 1 < N_CLUSTERS){ _clusters[N_CLUSTERS * (BLOCKDIM - tid - 1) + c + 1] += _clusters[N_CLUSTERS * (BLOCKDIM - tid + stride - 1) + c + 1]; } __syncthreads(); } } if(tid == 0){ #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ clusters_d[N_CLUSTERS * blockIdx.x + c] = _clusters[c]; } } } //__global__ void calculateClusters_k(const int* labels, const int clusterIndex){ // extern __shared__ Cluster _clusters[]; // int pos = threadIdx.x + blockIdx.x * blockDim.x; // int tid = threadIdx.x; // _clusters[tid] = Cluster(); // _clusters[tid].sum = 0.0f; // _clusters[tid].count = 0; // if(pos < N && labels[pos] == clusterIndex){ // _clusters[tid].sum = tex1Dfetch(tex, pos); // _clusters[tid].count = 1; // } // __syncthreads(); // for(unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2){ // if(threadIdx.x < stride){ // _clusters[tid].sum += _clusters[tid + stride].sum; // _clusters[tid].count += _clusters[tid + stride].count; // } // __syncthreads(); // } // // if(tid == 0){ // clusters_d[blockIdx.x].sum = _clusters[0].sum; // clusters_d[blockIdx.x].count = _clusters[0].count; //// printf("BlockIDX = %d, Sum = %f, Count = %d", blockIdx.x, _clusters[0].sum, _clusters[0].count); // } //} __global__ void findCenters_k(float* newCenters){ extern __shared__ Cluster _clusters[]; int pos = threadIdx.x + blockIdx.x * BLOCKDIM2; int tid = threadIdx.x; int bid = blockIdx.x; #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ _clusters[N_CLUSTERS * tid + c] = clusters_d[tid + bid * N_CLUSTERS * BLOCKDIM2]; } __syncthreads(); #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c += 2){ #pragma unroll for(unsigned int stride = BLOCKDIM2 >> 1; stride > 0; stride >>= 1){ if(tid < stride){ _clusters[N_CLUSTERS * tid + c] += _clusters[N_CLUSTERS * (tid + stride) + c]; } else if(c + 1 < N_CLUSTERS){ _clusters[N_CLUSTERS * (BLOCKDIM2 - tid - 1) + c + 1] += _clusters[N_CLUSTERS * (BLOCKDIM2 - tid - 1 + stride) + c + 1]; } __syncthreads(); } } if(tid == 0){ #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ newCenters[c] = _clusters[c].count > 0 ? fdividef(_clusters[c].sum, _clusters[c].count) : 0.0f; } } } int main(){ // show memory usage of GPU size_t free_byte ; size_t total_byte ; CUDA_CALL(hipMemGetInfo, &free_byte, &total_byte); double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db / 1024 / 1024, free_db / 1024 / 1024, total_db / 1024 / 1024); random_device rd; mt19937 gen(rd()); uniform_real_distribution<> dis(0.0, 1.0); const int blockSize = (N + BLOCKDIM - 1) / BLOCKDIM; float* src = new float[N]; int* labels = new int[N]; float* centers = new float[N_CLUSTERS]; float* src_d, *centers_d; // Cluster* clusters_d; int* labels_d; CUDA_CALL(hipMalloc, (void**)&src_d, sizeof(float) * N); CUDA_CALL(hipMalloc, (void**)&labels_d, sizeof(int) * N); CUDA_CALL(hipMalloc, (void**)&centers_d, sizeof(float) * N_CLUSTERS); // CUDA_CALL(hipMalloc, (void**)&clusters_d, sizeof(struct Cluster) * (blockSize)); FILL_1D(src, N, dis(gen)); FILL_1D(centers, N_CLUSTERS, dis(gen)); CUDA_CALL(hipMemcpy, src_d, src, sizeof(float) * N, hipMemcpyHostToDevice); CUDA_CALL(hipBindTexture, NULL, tex, src_d, sizeof(float) * N); CUDA_CALL(hipMemcpy, centers_d, centers, sizeof(float) * N_CLUSTERS, hipMemcpyHostToDevice); hipEvent_t start, stop; CUDA_CALL(hipEventCreate, &start); CUDA_CALL(hipEventCreate, &stop); hipEventRecord(start); for(int it = 0; it < MAX_IT; it++){ hipLaunchKernelGGL(( relabel_k), dim3(blockSize), dim3(BLOCKDIM), N_CLUSTERS * sizeof(struct Cluster) * BLOCKDIM, 0, centers_d, labels_d); hipLaunchKernelGGL(( findCenters_k), dim3(1), dim3(BLOCKDIM2), N_CLUSTERS * sizeof(struct Cluster) * BLOCKDIM2, 0, centers_d); // for(int c = 0; c < N_CLUSTERS; c++){ // calculateClusters_k<<<BLOCKDIM2, BLOCKDIM, sizeof(struct Cluster) * BLOCKDIM>>>(labels_d, 0); // findCenters_k<<<1, BLOCKDIM2, sizeof(struct Cluster) * BLOCKDIM2>>>(centers_d, c); // } } hipEventRecord(stop); hipEventSynchronize(stop); hipDeviceSynchronize(); float msecs = 0.0f; hipEventElapsedTime(&msecs, start, stop); CUDA_CALL(hipMemcpy, labels, labels_d, sizeof(int) * N, hipMemcpyDeviceToHost); CUDA_CALL(hipMemcpy, centers, centers_d, sizeof(float) * N_CLUSTERS, hipMemcpyDeviceToHost); printf("Blocks = %d\n", blockSize); // PRINT_1D_I(labels, N); PRINT_1D_F(centers, N_CLUSTERS); int* freq = new int[N_CLUSTERS]; memset(freq, 0, sizeof(int) * N_CLUSTERS); for(int i = 0; i < N; i++){ freq[labels[i]]++; } int total = 0; for(int i = 0; i < N_CLUSTERS; i++) total += freq[i]; assert(total == N); printf("Time = %f\n", msecs); CUDA_CALL(hipEventDestroy, start); CUDA_CALL(hipEventDestroy, stop); CUDA_CALL(hipUnbindTexture, tex); CUDA_CALL(hipFree, src_d); // CUDA_CALL(hipFree, clusters_d); CUDA_CALL(hipFree, centers_d); CUDA_CALL(hipFree, labels_d); delete[] src; delete[] labels; delete[] centers; return 0; }
dcd8f9e05c065d3ac9fdbd9788296ee03edeacf9.cu
#include <assert.h> #include <cstdio> #include <random> using namespace std; #define MAX_IT 1 #define N (1024 * 1024) #define N_CLUSTERS 6 #define BLOCKDIM 1024 #define BLOCKDIM2 ((N + BLOCKDIM - 1) / BLOCKDIM) #define CUDA_CALL(F, ...)\ if((F(__VA_ARGS__)) != cudaSuccess){\ cudaError_t e = cudaGetLastError();\ printf("CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e));\ return(EXIT_FAILURE);\ } #define CURAND_CALL(F, ...)\ if((F(__VA_ARGS__)) != CURAND_STATUS_SUCCESS){\ cudaError_t e = cudaGetLastError();\ if(e != cudaSuccess){\ printf("CuRAND failure %s:%d: '%s'\n",__FILE__,__LINE__, cudaGetErrorString(e));\ }\ return(EXIT_FAILURE);\ } #define PRINT_1D_I(A, S)\ printf("[");\ for(int i = 0; i < S; i++){\ printf("%d, ", A[i]);\ }\ printf("]\n"); #define PRINT_1D_F(A, S)\ printf("[");\ for(int i = 0; i < S; i++){\ printf("%f, ", A[i]);\ }\ printf("]\n"); #define FILL_1D(A, S, V)\ for(int i = 0; i < S; i++){\ A[i] = V;\ } #define PRINT_FLAT2D(A, WIDTH, HEIGHT)\ printf("[\n");\ for(int i = 0; i < WIDTH; i++){\ printf("[");\ for(int j = 0; j < HEIGHT; j++){\ printf("%f, ", A[i + j * WIDTH]);\ }\ printf("]\n");\ }\ printf("]\n"); struct Cluster{ float sum; int count; __device__ inline void operator += (Cluster& a){ this->count += a.count; this->sum += a.sum; } }; texture<float, 1, cudaReadModeElementType> tex; __device__ Cluster clusters_d[N_CLUSTERS * ((N + BLOCKDIM - 1) / BLOCKDIM)]; __device__ float euclidianDist(const float a, const float b){ return fabsf(a - b); } __global__ void relabel_k(const float* clusters, int* labels){ extern __shared__ Cluster _clusters[]; int tid = threadIdx.x; int pos = threadIdx.x + blockIdx.x * blockDim.x; #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ Cluster cluster; cluster.sum = 0.0f; cluster.count = 0; _clusters[N_CLUSTERS * tid + c] = cluster; } __syncthreads(); if(pos < N){ float minDist = 1.0f; int clusterIndex = 0; float val = tex1Dfetch(tex, pos); #pragma unroll for(int c = 0; c < N_CLUSTERS; c++){ float dist = euclidianDist(val, clusters[c]); if(dist <= minDist){ clusterIndex = c; minDist = dist; } } labels[pos] = clusterIndex; _clusters[N_CLUSTERS * tid + clusterIndex].sum = val; _clusters[N_CLUSTERS * tid + clusterIndex].count = 1; } __syncthreads(); #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c += 2){ #pragma unroll for(unsigned int stride = BLOCKDIM >> 1; stride > 0; stride >>= 1){ if(tid < stride){ _clusters[N_CLUSTERS * tid + c] += _clusters[N_CLUSTERS * (tid + stride) + c]; } else if(c + 1 < N_CLUSTERS){ _clusters[N_CLUSTERS * (BLOCKDIM - tid - 1) + c + 1] += _clusters[N_CLUSTERS * (BLOCKDIM - tid + stride - 1) + c + 1]; } __syncthreads(); } } if(tid == 0){ #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ clusters_d[N_CLUSTERS * blockIdx.x + c] = _clusters[c]; } } } //__global__ void calculateClusters_k(const int* labels, const int clusterIndex){ // extern __shared__ Cluster _clusters[]; // int pos = threadIdx.x + blockIdx.x * blockDim.x; // int tid = threadIdx.x; // _clusters[tid] = Cluster(); // _clusters[tid].sum = 0.0f; // _clusters[tid].count = 0; // if(pos < N && labels[pos] == clusterIndex){ // _clusters[tid].sum = tex1Dfetch(tex, pos); // _clusters[tid].count = 1; // } // __syncthreads(); // for(unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2){ // if(threadIdx.x < stride){ // _clusters[tid].sum += _clusters[tid + stride].sum; // _clusters[tid].count += _clusters[tid + stride].count; // } // __syncthreads(); // } // // if(tid == 0){ // clusters_d[blockIdx.x].sum = _clusters[0].sum; // clusters_d[blockIdx.x].count = _clusters[0].count; //// printf("BlockIDX = %d, Sum = %f, Count = %d", blockIdx.x, _clusters[0].sum, _clusters[0].count); // } //} __global__ void findCenters_k(float* newCenters){ extern __shared__ Cluster _clusters[]; int pos = threadIdx.x + blockIdx.x * BLOCKDIM2; int tid = threadIdx.x; int bid = blockIdx.x; #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ _clusters[N_CLUSTERS * tid + c] = clusters_d[tid + bid * N_CLUSTERS * BLOCKDIM2]; } __syncthreads(); #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c += 2){ #pragma unroll for(unsigned int stride = BLOCKDIM2 >> 1; stride > 0; stride >>= 1){ if(tid < stride){ _clusters[N_CLUSTERS * tid + c] += _clusters[N_CLUSTERS * (tid + stride) + c]; } else if(c + 1 < N_CLUSTERS){ _clusters[N_CLUSTERS * (BLOCKDIM2 - tid - 1) + c + 1] += _clusters[N_CLUSTERS * (BLOCKDIM2 - tid - 1 + stride) + c + 1]; } __syncthreads(); } } if(tid == 0){ #pragma unroll for(unsigned int c = 0; c < N_CLUSTERS; c++){ newCenters[c] = _clusters[c].count > 0 ? fdividef(_clusters[c].sum, _clusters[c].count) : 0.0f; } } } int main(){ // show memory usage of GPU size_t free_byte ; size_t total_byte ; CUDA_CALL(cudaMemGetInfo, &free_byte, &total_byte); double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db / 1024 / 1024, free_db / 1024 / 1024, total_db / 1024 / 1024); random_device rd; mt19937 gen(rd()); uniform_real_distribution<> dis(0.0, 1.0); const int blockSize = (N + BLOCKDIM - 1) / BLOCKDIM; float* src = new float[N]; int* labels = new int[N]; float* centers = new float[N_CLUSTERS]; float* src_d, *centers_d; // Cluster* clusters_d; int* labels_d; CUDA_CALL(cudaMalloc, (void**)&src_d, sizeof(float) * N); CUDA_CALL(cudaMalloc, (void**)&labels_d, sizeof(int) * N); CUDA_CALL(cudaMalloc, (void**)&centers_d, sizeof(float) * N_CLUSTERS); // CUDA_CALL(cudaMalloc, (void**)&clusters_d, sizeof(struct Cluster) * (blockSize)); FILL_1D(src, N, dis(gen)); FILL_1D(centers, N_CLUSTERS, dis(gen)); CUDA_CALL(cudaMemcpy, src_d, src, sizeof(float) * N, cudaMemcpyHostToDevice); CUDA_CALL(cudaBindTexture, NULL, tex, src_d, sizeof(float) * N); CUDA_CALL(cudaMemcpy, centers_d, centers, sizeof(float) * N_CLUSTERS, cudaMemcpyHostToDevice); cudaEvent_t start, stop; CUDA_CALL(cudaEventCreate, &start); CUDA_CALL(cudaEventCreate, &stop); cudaEventRecord(start); for(int it = 0; it < MAX_IT; it++){ relabel_k<<<blockSize, BLOCKDIM, N_CLUSTERS * sizeof(struct Cluster) * BLOCKDIM>>>(centers_d, labels_d); findCenters_k<<<1, BLOCKDIM2, N_CLUSTERS * sizeof(struct Cluster) * BLOCKDIM2>>>(centers_d); // for(int c = 0; c < N_CLUSTERS; c++){ // calculateClusters_k<<<BLOCKDIM2, BLOCKDIM, sizeof(struct Cluster) * BLOCKDIM>>>(labels_d, 0); // findCenters_k<<<1, BLOCKDIM2, sizeof(struct Cluster) * BLOCKDIM2>>>(centers_d, c); // } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaDeviceSynchronize(); float msecs = 0.0f; cudaEventElapsedTime(&msecs, start, stop); CUDA_CALL(cudaMemcpy, labels, labels_d, sizeof(int) * N, cudaMemcpyDeviceToHost); CUDA_CALL(cudaMemcpy, centers, centers_d, sizeof(float) * N_CLUSTERS, cudaMemcpyDeviceToHost); printf("Blocks = %d\n", blockSize); // PRINT_1D_I(labels, N); PRINT_1D_F(centers, N_CLUSTERS); int* freq = new int[N_CLUSTERS]; memset(freq, 0, sizeof(int) * N_CLUSTERS); for(int i = 0; i < N; i++){ freq[labels[i]]++; } int total = 0; for(int i = 0; i < N_CLUSTERS; i++) total += freq[i]; assert(total == N); printf("Time = %f\n", msecs); CUDA_CALL(cudaEventDestroy, start); CUDA_CALL(cudaEventDestroy, stop); CUDA_CALL(cudaUnbindTexture, tex); CUDA_CALL(cudaFree, src_d); // CUDA_CALL(cudaFree, clusters_d); CUDA_CALL(cudaFree, centers_d); CUDA_CALL(cudaFree, labels_d); delete[] src; delete[] labels; delete[] centers; return 0; }
291998c9e9f58f6f8581a15804c020e52cb0c1d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.h" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); }
291998c9e9f58f6f8581a15804c020e52cb0c1d1.cu
// ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.h" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); }
57738e8fe7b4fd23d81a14216c589fdedf01fb9a.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2015-2023 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <algorithm> #include <cmath> #include <cstdint> // std::int32_t #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/optional_weight.h" // OptionalWeights #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "init_estimation.h" // FitIntercept #include "xgboost/base.h" #include "xgboost/context.h" // Context #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/logging.h" #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/tree_model.h" // RegTree #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public FitIntercept { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } bst_target_t Targets(MetaInfo const& info) const override { // Multi-target regression. return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = ::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public FitIntercept { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } bst_target_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public FitIntercept { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public FitIntercept { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(ctx_); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public FitIntercept { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public FitIntercept { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } bst_target_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[sample_id]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(this->Targets(info)); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { auto out = base_margin->HostView(); out(0) = 0; } else { linalg::Vector<float> temp; common::Median(ctx_, info.labels, info.weights_, &temp); common::Mean(ctx_, temp, base_margin); } CHECK_EQ(base_margin->Size(), 1); auto out = base_margin->HostView(); // weighted avg std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v * w; }); collective::GlobalSum(info, &out.Values()); collective::GlobalSum(info, &w, 1); if (common::CloseTo(w, 0.0)) { // Mostly for handling empty dataset test. LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation."; out(0) = ObjFunction::DefaultBaseScore(); return; } std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, float learning_rate, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { ::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, learning_rate, prediction, 0.5, p_tree); } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
57738e8fe7b4fd23d81a14216c589fdedf01fb9a.cu
/** * Copyright 2015-2023 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <algorithm> #include <cmath> #include <cstdint> // std::int32_t #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/optional_weight.h" // OptionalWeights #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "init_estimation.h" // FitIntercept #include "xgboost/base.h" #include "xgboost/context.h" // Context #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/logging.h" #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/tree_model.h" // RegTree #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public FitIntercept { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } bst_target_t Targets(MetaInfo const& info) const override { // Multi-target regression. return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = std::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public FitIntercept { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } bst_target_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public FitIntercept { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public FitIntercept { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(ctx_); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public FitIntercept { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public FitIntercept { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } bst_target_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[sample_id]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(this->Targets(info)); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { auto out = base_margin->HostView(); out(0) = 0; } else { linalg::Vector<float> temp; common::Median(ctx_, info.labels, info.weights_, &temp); common::Mean(ctx_, temp, base_margin); } CHECK_EQ(base_margin->Size(), 1); auto out = base_margin->HostView(); // weighted avg std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v * w; }); collective::GlobalSum(info, &out.Values()); collective::GlobalSum(info, &w, 1); if (common::CloseTo(w, 0.0)) { // Mostly for handling empty dataset test. LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation."; out(0) = ObjFunction::DefaultBaseScore(); return; } std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, float learning_rate, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { ::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, learning_rate, prediction, 0.5, p_tree); } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
7612375152da6d1aee5ffdc6a1d6165d111823c1.hip
// !!! This is a file automatically generated by hipify!!! #include <wb.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define BLOCK_SIZE 512 #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void scan(float *input, float *output, float *aux, int len) { //@@ Modify the body of this kernel to generate the scanned blocks //@@ Make sure to use the workefficient version of the parallel scan //@@ Also make sure to store the block sum to the aux array __shared__ float XY[2 * BLOCK_SIZE]; //loading data from global memory to shared memory int i = 2 * blockIdx.x*blockDim.x + threadIdx.x; if (i<len) XY[threadIdx.x] = input[i]; else XY[threadIdx.x] = 0; if (i + blockDim.x<len) XY[threadIdx.x + blockDim.x] = input[i + blockDim.x]; else XY[threadIdx.x + blockDim.x] = 0; __syncthreads(); //Reduction Phase for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) { int index = (threadIdx.x + 1)*stride * 2 - 1; if (index < 2 * BLOCK_SIZE) XY[index] += XY[index - stride]; __syncthreads(); } //Post Reduction Phase for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) { int index = (threadIdx.x + 1)*stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE) XY[index + stride] += XY[index]; __syncthreads(); } //writing data to output if (i < len) output[i] = XY[threadIdx.x]; if (i + blockDim.x < len) output[i + blockDim.x] = XY[threadIdx.x + blockDim.x]; if(aux!=NULL && threadIdx.x==0) aux[blockIdx.x] = XY[2 * blockDim.x - 1]; } __global__ void addScannedBlockSums(float *input, float *aux, int len) { //@@ Modify the body of this kernel to add scanned block sums to //@@ all values of the scanned blocks int i = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (blockIdx.x > 0) { if (i < len) input[i] += aux[blockIdx.x - 1]; if (i + blockDim.x < len) input[i + blockDim.x] += aux[blockIdx.x - 1]; } } int main(int argc, char **argv) { wbArg_t args; float *hostInput; // The input 1D list float *hostOutput; // The output 1D list float *deviceInput; float *deviceOutput; float *deviceAuxArray, *deviceAuxScannedArray; int numElements; // number of elements in the input/output list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float *)malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating device memory."); //@@ Allocate device memory //you can assume that aux array size would not need to be more than BLOCK_SIZE*2 (i.e., 1024) wbCheck(hipMalloc((void **)&deviceInput, numElements * sizeof(float))); wbCheck(hipMalloc((void **)&deviceAuxScannedArray, 2 * BLOCK_SIZE * sizeof(float))); wbCheck(hipMalloc((void **)&deviceAuxArray, 2 * BLOCK_SIZE * sizeof(float))); wbCheck(hipMalloc((void **)&deviceOutput, numElements * sizeof(float))); wbTime_stop(GPU, "Allocating device memory."); wbTime_start(GPU, "Clearing output device memory."); //@@ zero out the deviceOutput using hipMemset() by uncommenting the below line wbCheck(hipMemset(deviceOutput, 0, numElements * sizeof(float))); wbTime_stop(GPU, "Clearing output device memory."); wbTime_start(GPU, "Copying input host memory to device."); //@@ Copy input host memory to device wbCheck(hipMemcpy(deviceInput, hostInput, numElements * sizeof(float), hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input host memory to device."); //@@ Initialize the grid and block dimensions here wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce //@@ You need to launch scan kernel twice: 1) for generating scanned blocks //@@ (hint: pass deviceAuxArray to the aux parameter) //@@ and 2) for generating scanned aux array that has the scanned block sums. //@@ (hint: pass NULL to the aux parameter) //@@ Then you should call addScannedBlockSums kernel. int numBlocks = ceil(numElements / float(BLOCK_SIZE * 2)); dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); scan<<<dimGrid, dimBlock >> >(deviceInput, deviceOutput, deviceAuxArray, numElements); hipDeviceSynchronize(); dim3 singleGrid(1, 1, 1); scan<<<singleGrid, dimBlock >> >(deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE * 2); hipDeviceSynchronize(); addScannedBlockSums <<<dimGrid, dimBlock >> >(deviceOutput, deviceAuxScannedArray, numElements); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output device memory to host"); //@@ Copy results from device to host wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements * sizeof(float), hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output device memory to host"); wbTime_start(GPU, "Freeing device memory"); //@@ Deallocate device memory hipFree(deviceInput); hipFree(deviceAuxScannedArray); hipFree(deviceAuxArray); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing device memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
7612375152da6d1aee5ffdc6a1d6165d111823c1.cu
#include <wb.h> #include <cuda.h> #include <cuda_runtime_api.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define BLOCK_SIZE 512 #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void scan(float *input, float *output, float *aux, int len) { //@@ Modify the body of this kernel to generate the scanned blocks //@@ Make sure to use the workefficient version of the parallel scan //@@ Also make sure to store the block sum to the aux array __shared__ float XY[2 * BLOCK_SIZE]; //loading data from global memory to shared memory int i = 2 * blockIdx.x*blockDim.x + threadIdx.x; if (i<len) XY[threadIdx.x] = input[i]; else XY[threadIdx.x] = 0; if (i + blockDim.x<len) XY[threadIdx.x + blockDim.x] = input[i + blockDim.x]; else XY[threadIdx.x + blockDim.x] = 0; __syncthreads(); //Reduction Phase for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2) { int index = (threadIdx.x + 1)*stride * 2 - 1; if (index < 2 * BLOCK_SIZE) XY[index] += XY[index - stride]; __syncthreads(); } //Post Reduction Phase for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) { int index = (threadIdx.x + 1)*stride * 2 - 1; if (index + stride < 2 * BLOCK_SIZE) XY[index + stride] += XY[index]; __syncthreads(); } //writing data to output if (i < len) output[i] = XY[threadIdx.x]; if (i + blockDim.x < len) output[i + blockDim.x] = XY[threadIdx.x + blockDim.x]; if(aux!=NULL && threadIdx.x==0) aux[blockIdx.x] = XY[2 * blockDim.x - 1]; } __global__ void addScannedBlockSums(float *input, float *aux, int len) { //@@ Modify the body of this kernel to add scanned block sums to //@@ all values of the scanned blocks int i = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (blockIdx.x > 0) { if (i < len) input[i] += aux[blockIdx.x - 1]; if (i + blockDim.x < len) input[i + blockDim.x] += aux[blockIdx.x - 1]; } } int main(int argc, char **argv) { wbArg_t args; float *hostInput; // The input 1D list float *hostOutput; // The output 1D list float *deviceInput; float *deviceOutput; float *deviceAuxArray, *deviceAuxScannedArray; int numElements; // number of elements in the input/output list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float *)malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating device memory."); //@@ Allocate device memory //you can assume that aux array size would not need to be more than BLOCK_SIZE*2 (i.e., 1024) wbCheck(cudaMalloc((void **)&deviceInput, numElements * sizeof(float))); wbCheck(cudaMalloc((void **)&deviceAuxScannedArray, 2 * BLOCK_SIZE * sizeof(float))); wbCheck(cudaMalloc((void **)&deviceAuxArray, 2 * BLOCK_SIZE * sizeof(float))); wbCheck(cudaMalloc((void **)&deviceOutput, numElements * sizeof(float))); wbTime_stop(GPU, "Allocating device memory."); wbTime_start(GPU, "Clearing output device memory."); //@@ zero out the deviceOutput using cudaMemset() by uncommenting the below line wbCheck(cudaMemset(deviceOutput, 0, numElements * sizeof(float))); wbTime_stop(GPU, "Clearing output device memory."); wbTime_start(GPU, "Copying input host memory to device."); //@@ Copy input host memory to device wbCheck(cudaMemcpy(deviceInput, hostInput, numElements * sizeof(float), cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input host memory to device."); //@@ Initialize the grid and block dimensions here wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce //@@ You need to launch scan kernel twice: 1) for generating scanned blocks //@@ (hint: pass deviceAuxArray to the aux parameter) //@@ and 2) for generating scanned aux array that has the scanned block sums. //@@ (hint: pass NULL to the aux parameter) //@@ Then you should call addScannedBlockSums kernel. int numBlocks = ceil(numElements / float(BLOCK_SIZE * 2)); dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); scan<<<dimGrid, dimBlock >> >(deviceInput, deviceOutput, deviceAuxArray, numElements); cudaDeviceSynchronize(); dim3 singleGrid(1, 1, 1); scan<<<singleGrid, dimBlock >> >(deviceAuxArray, deviceAuxScannedArray, NULL, BLOCK_SIZE * 2); cudaDeviceSynchronize(); addScannedBlockSums <<<dimGrid, dimBlock >> >(deviceOutput, deviceAuxScannedArray, numElements); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output device memory to host"); //@@ Copy results from device to host wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements * sizeof(float), cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output device memory to host"); wbTime_start(GPU, "Freeing device memory"); //@@ Deallocate device memory cudaFree(deviceInput); cudaFree(deviceAuxScannedArray); cudaFree(deviceAuxArray); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing device memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
f2392dc5a7a3f4ef8e757124a740b0c375411f1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); hipError_t permGen(); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } system("pause"); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <1, size >> >(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
f2392dc5a7a3f4ef8e757124a740b0c375411f1e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); cudaError_t permGen(); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } system("pause"); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <1, size >> >(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
173eba129517e8bf068e74ce9b04f790a13aa369.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/Math.h> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> namespace at::native { namespace { constexpr char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward"; void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t { return chebyshev_polynomial_t_forward<scalar_t, true>(x, n); }); }); #endif } // chebyshev_polynomial_t_kernel_cuda } // namespace (anonymous) REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda); } // namespace at::native
173eba129517e8bf068e74ce9b04f790a13aa369.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/Math.h> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> namespace at::native { namespace { constexpr char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward"; void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t { return chebyshev_polynomial_t_forward<scalar_t, true>(x, n); }); }); #endif } // chebyshev_polynomial_t_kernel_cuda } // namespace (anonymous) REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda); } // namespace at::native
dc75c6df82c3fcd77f9cf6900755994eb2794fd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Created by Guoliang Kang. #include <vector> #include <algorithm> #include "caffe/layers/shakeout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ShakeoutForward_kernel(const int n, const Dtype scale, const Dtype shakeout_const, const Dtype* mask, const Dtype* in, Dtype* out1, Dtype* out2) { CUDA_KERNEL_LOOP(index, n) { out1[index] = in[index] * mask[index] * scale; out2[index] = in[index] * shakeout_const * (mask[index] * scale - 1); } } template <typename Dtype> __global__ void generateMask_kernel(const int count, Dtype* mask, const Dtype threshold){ CUDA_KERNEL_LOOP(index, count) { mask[index] = mask[index]>=threshold?1:0; } } template <typename Dtype> void ShakeoutLayer<Dtype>::generateMask_gpu(){ Dtype* mask = rand_vec_->mutable_gpu_data(); caffe_gpu_rng_uniform<Dtype>(rand_vec_->count(), 0, 1,mask); int count = rand_vec_->count(); hipLaunchKernelGGL(( generateMask_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mask, this->threshold_); } template <typename Dtype> __global__ void getSignOfWeight_kernel(const int count, const Dtype* data, Dtype* sign_data, const Dtype scale){ CUDA_KERNEL_LOOP(index, count) { sign_data[index] = data[index]>0?1:(data[index]<0?-1:0); } } template <typename Dtype> void ShakeoutLayer<Dtype>::getSignOfWeight_gpu(){ const Dtype* weights = this->blobs_[0]->gpu_data(); Dtype* sign_weights = this->sign_blobs_[0]->mutable_gpu_data(); const int count = this->blobs_[0]->count(); hipLaunchKernelGGL(( getSignOfWeight_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, weights, sign_weights, this->tanh_smooth_scale_); if(this->sign_blobs_.size()>1){ caffe_gpu_set(this->sign_blobs_[1]->count(), Dtype(0), this->sign_blobs_[1]->mutable_gpu_data()); } } template <typename Dtype> __global__ void TruncWeight_kernel(Dtype* data, const int count, const float threshold){ CUDA_KERNEL_LOOP(index, count) { if(data[index] < threshold && data[index] > -threshold){ data[index] = 0; } } } template <typename Dtype> void ShakeoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count_bottom = bottom[0]->count(); const int count_top = top[0]->count(); if (this->phase_ == TRAIN) { // Create random numbers generateMask_gpu(); // the signs of biases are all set zeros getSignOfWeight_gpu(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); vector<Blob<Dtype>*> bottom_split1(bottom.size()), bottom_split2(bottom.size()); for(int i = 0; i < bottom.size(); i++){ bottom_split1[i] = new Blob<Dtype>(bottom[i]->shape()); bottom_split2[i] = new Blob<Dtype>(bottom[i]->shape()); } Dtype* bottom_split1_data = bottom_split1[0]->mutable_gpu_data(); Dtype* bottom_split2_data = bottom_split2[0]->mutable_gpu_data(); vector<Blob<Dtype>*> top_split1(top.size()), top_split2(top.size()); for(int i = 0; i < top.size(); i++){ top_split1[i] = new Blob<Dtype>(top[i]->shape()); top_split2[i] = new Blob<Dtype>(top[i]->shape()); } Dtype* top_split1_data = top_split1[0]->mutable_gpu_data(); Dtype* top_split2_data = top_split2[0]->mutable_gpu_data(); const Dtype* mask = rand_vec_->mutable_gpu_data(); hipLaunchKernelGGL(( ShakeoutForward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bottom, this->scale_, this->shakeout_const_, mask, bottom_data, bottom_split1_data, bottom_split2_data); // reset the operate_layer_'s weights operate_layer_->blobs() = this->blobs_; // forward operate_layer_->Forward(bottom_split1, top_split1); // reset and forward again operate_layer_->blobs() = this->sign_blobs_; operate_layer_->Forward(bottom_split2, top_split2); // sum // gpu version caffe_gpu_add<Dtype>(count_top, top_split1_data, top_split2_data, top_data); for(int i = 0; i < bottom.size(); i++){ delete bottom_split1[i]; delete bottom_split2[i]; } for(int i = 0; i < top.size(); i++){ delete top_split1[i]; delete top_split2[i]; } } else { operate_layer_->blobs() = this->blobs_; operate_layer_->Forward(bottom, top); } } template <typename Dtype> __global__ void ShakeoutBackward_neuron_kernel(const int n, const Dtype scale, const Dtype shakeout_const, const Dtype* in_diff1, const Dtype* in_diff2, const Dtype* mask, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff1[index] * mask[index] * scale + in_diff2[index] * shakeout_const * (mask[index] * scale - 1); } } template <typename Dtype> __global__ void ShakeoutBackward_weights_kernel(const int n, Dtype* sign_weights_diff, const Dtype* sign_weights_data, Dtype* weights_diff, const Dtype scale, const Dtype* weights_data){ CUDA_KERNEL_LOOP(index, n) { Dtype sign_diff_approx = scale * (1 - tanh(scale * weights_data[index]) * tanh(scale * weights_data[index])); sign_weights_diff[index] *= sign_diff_approx; weights_diff[index] += sign_weights_diff[index]; } } template <typename Dtype> void ShakeoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* mask = rand_vec_->gpu_data(); const int count_bottom = bottom[0]->count(); if (this->phase_ == TRAIN) { vector<Blob<Dtype>*> bottom_split1(bottom.size()), bottom_split2(bottom.size()); for(int i = 0; i < bottom.size(); i++){ bottom_split1[i] = new Blob<Dtype>(bottom[i]->shape()); bottom_split2[i] = new Blob<Dtype>(bottom[i]->shape()); } Dtype* bottom_split1_data = bottom_split1[0]->mutable_gpu_data(); const Dtype* bottom_split1_diff = bottom_split1[0]->gpu_diff(); Dtype* bottom_split2_data = bottom_split2[0]->mutable_gpu_data(); const Dtype* bottom_split2_diff = bottom_split2[0]->gpu_diff(); hipLaunchKernelGGL(( ShakeoutForward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bottom, this->scale_, this->shakeout_const_, mask, bottom_data, bottom_split1_data, bottom_split2_data); // compute gradients with respect to neurons // reset the operate_layer_'s weights operate_layer_->blobs() = this->blobs_; // backward operate_layer_->Backward(top, propagate_down, bottom_split1); // reset and backward again operate_layer_->blobs() = this->sign_blobs_; for(int i=0;i<this->sign_blobs_.size();i++){ caffe_gpu_set(this->sign_blobs_[i]->count(), Dtype(0), this->sign_blobs_[i]->mutable_gpu_diff()); } operate_layer_->Backward(top, propagate_down, bottom_split2); // gpu version hipLaunchKernelGGL(( ShakeoutBackward_neuron_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bottom, this->scale_, this->shakeout_const_, bottom_split1_diff, bottom_split2_diff, mask, bottom_diff); // compute gradients with respect to weights and biases (for biases, already updated in the previous process Dtype* weights_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* weights_data = this->blobs_[0]->gpu_data(); Dtype* sign_weights_diff = this->sign_blobs_[0]->mutable_gpu_diff(); const Dtype* sign_weights_data = this->sign_blobs_[0]->gpu_data(); hipLaunchKernelGGL(( ShakeoutBackward_weights_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), sign_weights_diff, sign_weights_data, weights_diff, this->tanh_smooth_scale_, weights_data); for(int i = 0; i < bottom.size(); i++){ delete bottom_split1[i]; delete bottom_split2[i]; } } else { //caffe_copy(top[0]->count(), top_diff, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ShakeoutLayer); } // namespace caffe
dc75c6df82c3fcd77f9cf6900755994eb2794fd9.cu
// Created by Guoliang Kang. #include <vector> #include <algorithm> #include "caffe/layers/shakeout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ShakeoutForward_kernel(const int n, const Dtype scale, const Dtype shakeout_const, const Dtype* mask, const Dtype* in, Dtype* out1, Dtype* out2) { CUDA_KERNEL_LOOP(index, n) { out1[index] = in[index] * mask[index] * scale; out2[index] = in[index] * shakeout_const * (mask[index] * scale - 1); } } template <typename Dtype> __global__ void generateMask_kernel(const int count, Dtype* mask, const Dtype threshold){ CUDA_KERNEL_LOOP(index, count) { mask[index] = mask[index]>=threshold?1:0; } } template <typename Dtype> void ShakeoutLayer<Dtype>::generateMask_gpu(){ Dtype* mask = rand_vec_->mutable_gpu_data(); caffe_gpu_rng_uniform<Dtype>(rand_vec_->count(), 0, 1,mask); int count = rand_vec_->count(); generateMask_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, mask, this->threshold_); } template <typename Dtype> __global__ void getSignOfWeight_kernel(const int count, const Dtype* data, Dtype* sign_data, const Dtype scale){ CUDA_KERNEL_LOOP(index, count) { sign_data[index] = data[index]>0?1:(data[index]<0?-1:0); } } template <typename Dtype> void ShakeoutLayer<Dtype>::getSignOfWeight_gpu(){ const Dtype* weights = this->blobs_[0]->gpu_data(); Dtype* sign_weights = this->sign_blobs_[0]->mutable_gpu_data(); const int count = this->blobs_[0]->count(); getSignOfWeight_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, weights, sign_weights, this->tanh_smooth_scale_); if(this->sign_blobs_.size()>1){ caffe_gpu_set(this->sign_blobs_[1]->count(), Dtype(0), this->sign_blobs_[1]->mutable_gpu_data()); } } template <typename Dtype> __global__ void TruncWeight_kernel(Dtype* data, const int count, const float threshold){ CUDA_KERNEL_LOOP(index, count) { if(data[index] < threshold && data[index] > -threshold){ data[index] = 0; } } } template <typename Dtype> void ShakeoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count_bottom = bottom[0]->count(); const int count_top = top[0]->count(); if (this->phase_ == TRAIN) { // Create random numbers generateMask_gpu(); // the signs of biases are all set zeros getSignOfWeight_gpu(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); vector<Blob<Dtype>*> bottom_split1(bottom.size()), bottom_split2(bottom.size()); for(int i = 0; i < bottom.size(); i++){ bottom_split1[i] = new Blob<Dtype>(bottom[i]->shape()); bottom_split2[i] = new Blob<Dtype>(bottom[i]->shape()); } Dtype* bottom_split1_data = bottom_split1[0]->mutable_gpu_data(); Dtype* bottom_split2_data = bottom_split2[0]->mutable_gpu_data(); vector<Blob<Dtype>*> top_split1(top.size()), top_split2(top.size()); for(int i = 0; i < top.size(); i++){ top_split1[i] = new Blob<Dtype>(top[i]->shape()); top_split2[i] = new Blob<Dtype>(top[i]->shape()); } Dtype* top_split1_data = top_split1[0]->mutable_gpu_data(); Dtype* top_split2_data = top_split2[0]->mutable_gpu_data(); const Dtype* mask = rand_vec_->mutable_gpu_data(); ShakeoutForward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>(count_bottom, this->scale_, this->shakeout_const_, mask, bottom_data, bottom_split1_data, bottom_split2_data); // reset the operate_layer_'s weights operate_layer_->blobs() = this->blobs_; // forward operate_layer_->Forward(bottom_split1, top_split1); // reset and forward again operate_layer_->blobs() = this->sign_blobs_; operate_layer_->Forward(bottom_split2, top_split2); // sum // gpu version caffe_gpu_add<Dtype>(count_top, top_split1_data, top_split2_data, top_data); for(int i = 0; i < bottom.size(); i++){ delete bottom_split1[i]; delete bottom_split2[i]; } for(int i = 0; i < top.size(); i++){ delete top_split1[i]; delete top_split2[i]; } } else { operate_layer_->blobs() = this->blobs_; operate_layer_->Forward(bottom, top); } } template <typename Dtype> __global__ void ShakeoutBackward_neuron_kernel(const int n, const Dtype scale, const Dtype shakeout_const, const Dtype* in_diff1, const Dtype* in_diff2, const Dtype* mask, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff1[index] * mask[index] * scale + in_diff2[index] * shakeout_const * (mask[index] * scale - 1); } } template <typename Dtype> __global__ void ShakeoutBackward_weights_kernel(const int n, Dtype* sign_weights_diff, const Dtype* sign_weights_data, Dtype* weights_diff, const Dtype scale, const Dtype* weights_data){ CUDA_KERNEL_LOOP(index, n) { Dtype sign_diff_approx = scale * (1 - tanh(scale * weights_data[index]) * tanh(scale * weights_data[index])); sign_weights_diff[index] *= sign_diff_approx; weights_diff[index] += sign_weights_diff[index]; } } template <typename Dtype> void ShakeoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* mask = rand_vec_->gpu_data(); const int count_bottom = bottom[0]->count(); if (this->phase_ == TRAIN) { vector<Blob<Dtype>*> bottom_split1(bottom.size()), bottom_split2(bottom.size()); for(int i = 0; i < bottom.size(); i++){ bottom_split1[i] = new Blob<Dtype>(bottom[i]->shape()); bottom_split2[i] = new Blob<Dtype>(bottom[i]->shape()); } Dtype* bottom_split1_data = bottom_split1[0]->mutable_gpu_data(); const Dtype* bottom_split1_diff = bottom_split1[0]->gpu_diff(); Dtype* bottom_split2_data = bottom_split2[0]->mutable_gpu_data(); const Dtype* bottom_split2_diff = bottom_split2[0]->gpu_diff(); ShakeoutForward_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>(count_bottom, this->scale_, this->shakeout_const_, mask, bottom_data, bottom_split1_data, bottom_split2_data); // compute gradients with respect to neurons // reset the operate_layer_'s weights operate_layer_->blobs() = this->blobs_; // backward operate_layer_->Backward(top, propagate_down, bottom_split1); // reset and backward again operate_layer_->blobs() = this->sign_blobs_; for(int i=0;i<this->sign_blobs_.size();i++){ caffe_gpu_set(this->sign_blobs_[i]->count(), Dtype(0), this->sign_blobs_[i]->mutable_gpu_diff()); } operate_layer_->Backward(top, propagate_down, bottom_split2); // gpu version ShakeoutBackward_neuron_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>(count_bottom, this->scale_, this->shakeout_const_, bottom_split1_diff, bottom_split2_diff, mask, bottom_diff); // compute gradients with respect to weights and biases (for biases, already updated in the previous process Dtype* weights_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* weights_data = this->blobs_[0]->gpu_data(); Dtype* sign_weights_diff = this->sign_blobs_[0]->mutable_gpu_diff(); const Dtype* sign_weights_data = this->sign_blobs_[0]->gpu_data(); ShakeoutBackward_weights_kernel<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[0]->count(), sign_weights_diff, sign_weights_data, weights_diff, this->tanh_smooth_scale_, weights_data); for(int i = 0; i < bottom.size(); i++){ delete bottom_split1[i]; delete bottom_split2[i]; } } else { //caffe_copy(top[0]->count(), top_diff, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ShakeoutLayer); } // namespace caffe
7600e36e9906868f82c495e0e4cb50048e825d4e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "exec_time.h" #include <math.h> #include "matrix_lib.h" int print_matrix(struct matrix *matrix) { unsigned long int i; unsigned long int N; unsigned long int nxt_newLine; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Initialize new line controol */ nxt_newLine = matrix->width - 1; /* Print matrix elements */ for (i = 0; i < N; i++) { printf("%5.1f ", matrix->h_rows[i]); if (i == nxt_newLine) { printf("\n"); nxt_newLine += matrix->width; } } return 1; } int load_matrix(struct matrix *matrix, FILE *filename) { unsigned long int i = 0; unsigned long int N = 0; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; float *nxt = matrix->h_rows; for ( i = 0; i < N; i += 1) { fread(nxt, sizeof(float), 1, filename); matrix->h_rows[i] = *nxt; } return 1; } int main_func(int argc, char **argv) { float scalar = atof(argv[1]); Matrix matA, matB; matA.height = atoi(argv[2]); matA.width = atoi(argv[3]); matB.height = atoi(argv[4]); matB.width =atoi(argv[5]); FILE *file1, *file2, *result1, *result2; file1 = fopen(argv[6],"rb"); file2 = fopen(argv[7],"rb"); result1 = fopen(argv[8],"wb"); result2 = fopen(argv[9],"wb"); if(file1 == NULL || file2 ==NULL) { fprintf(stdout, ".dat failed to open. exiting...\n"); exit(1); } hipError_t hipError_t; int i; struct timeval start, stop; // Disable buffering entirely setbuf(stdout, NULL); // Allocating arrays on host printf("Allocating matA.h_rows and matB.h_rows on host..."); gettimeofday(&start, NULL); matA.h_rows = (float*)malloc((matA.height*matA.width)*sizeof(float)); matB.h_rows = (float*)malloc((matB.height*matB.width)*sizeof(float)); // check malloc memory allocation if (matA.h_rows == NULL || matB.h_rows == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array matA.d_rows and matB.d_rows on device..."); gettimeofday(&start, NULL); hipError_t = hipMalloc(&matA.d_rows, matA.height*matA.width*sizeof(float)); // check hipMalloc memory allocation if (hipError_t != hipSuccess) { printf("hipMalloc matA.d_rows returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t); return 1; } hipError_t = hipMalloc(&matB.d_rows, matB.height*matB.width*sizeof(float)); // check hipMalloc memory allocation if (hipError_t != hipSuccess) { printf("hipMalloc matB.d_rows returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Initialize host memory printf("Initializing array matA and matB on host..."); gettimeofday(&start, NULL); load_matrix(&matA, file1); load_matrix(&matB, file2); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array matC.d_rows on device..."); gettimeofday(&start, NULL); Matrix matC; matC.height = matA.height; matC.width = matB.width; matC.h_rows = (float*)malloc((matC.height*matC.width)*sizeof(float)); // check malloc memory allocation if (matC.h_rows == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } hipError_t = hipMalloc(&matC.d_rows, matC.height*matC.width*sizeof(float)); // check hipMalloc memory allocation if (hipError_t != hipSuccess) { printf("hipMalloc matC.d_rows returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from host to device printf("Copying arrays from host to device..."); gettimeofday(&start, NULL); hipError_t = hipMemcpy(matA.d_rows, matA.h_rows, (matA.width*matA.height)*sizeof(float), hipMemcpyHostToDevice); if (hipError_t != hipSuccess) { printf("hipMemcpy matA.d_rows -> matA.h_rows returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } hipError_t = hipMemcpy(matB.d_rows, matB.h_rows, (matB.width*matB.height)*sizeof(float), hipMemcpyHostToDevice); if (hipError_t != hipSuccess) { printf("hipMemcpy matB.d_rows -> matB.h_rows returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } hipError_t = hipMemcpy(matC.d_rows, matC.h_rows, (matC.width*matC.height)*sizeof(float), hipMemcpyHostToDevice); if (hipError_t != hipSuccess) { printf("hipMemcpy matC.d_rows -> matC.h_rows returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Run kernel on elements on the GPU printf("Running kernel on elements of matA.d_rows ..."); gettimeofday(&start, NULL); float valorAantes = matA.h_rows[0]; scalar_matrix_mult(scalar, &matA); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from device to host printf("Copying array from device matA.d_rows to host matA.h_rows..."); gettimeofday(&start, NULL); hipError_t = hipMemcpy(matA.h_rows,matA.d_rows, (matA.height*matA.width) * sizeof(float), hipMemcpyDeviceToHost); if (hipError_t != hipSuccess) { printf("hipMemcpy matA.d_rows -> matA.h_rows returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); //Check for errors printf("Checking for processing errors for multiplication of matrix a and scalar..."); gettimeofday(&start, NULL); float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < (matA.height*matA.width); i++) { maxError = (maxError > (diffError=fabs(matA.h_rows[0]-scalar*valorAantes)))? maxError : diffError; //printf("%d -> %f\n", i, matA.h_rows[i]); } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); printf("Max error: %f\n", maxError); //print matrixA print_matrix(&matA); if(matrix_matrix_mult(&matA, &matB, &matC) !=1) return 0; // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Copy array from device to host printf("Copying array from device matC.d_rows to host matC.h_rows...\n"); gettimeofday(&start, NULL); hipError_t = hipMemcpy(matC.h_rows,matC.d_rows, (matC.height*matC.width) * sizeof(float), hipMemcpyDeviceToHost); if (hipError_t != hipSuccess) { printf("hipMemcpy matC.d_rows -> matC.h_rows returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } print_matrix(&matC); // Free memory printf("Freeing memory..."); gettimeofday(&start, NULL); hipFree(matA.d_rows); hipFree(matB.d_rows); free(matA.h_rows); free(matB.h_rows); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); for(int i=0; i<matA.height*matA.width; i++){ fwrite((void*)(&matA.h_rows[i]), sizeof(matA.h_rows[i]), 1, result1); } for(int i=0; i<matC.height*matC.width; i++){ fwrite((void*)(&matC.h_rows[i]), sizeof(matC.h_rows[i]), 1, result2); } fclose(file1); fclose(file2); fclose(result1); fclose(result2); return 0; }
7600e36e9906868f82c495e0e4cb50048e825d4e.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "exec_time.h" #include <math.h> #include "matrix_lib.h" int print_matrix(struct matrix *matrix) { unsigned long int i; unsigned long int N; unsigned long int nxt_newLine; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Initialize new line controol */ nxt_newLine = matrix->width - 1; /* Print matrix elements */ for (i = 0; i < N; i++) { printf("%5.1f ", matrix->h_rows[i]); if (i == nxt_newLine) { printf("\n"); nxt_newLine += matrix->width; } } return 1; } int load_matrix(struct matrix *matrix, FILE *filename) { unsigned long int i = 0; unsigned long int N = 0; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; float *nxt = matrix->h_rows; for ( i = 0; i < N; i += 1) { fread(nxt, sizeof(float), 1, filename); matrix->h_rows[i] = *nxt; } return 1; } int main_func(int argc, char **argv) { float scalar = atof(argv[1]); Matrix matA, matB; matA.height = atoi(argv[2]); matA.width = atoi(argv[3]); matB.height = atoi(argv[4]); matB.width =atoi(argv[5]); FILE *file1, *file2, *result1, *result2; file1 = fopen(argv[6],"rb"); file2 = fopen(argv[7],"rb"); result1 = fopen(argv[8],"wb"); result2 = fopen(argv[9],"wb"); if(file1 == NULL || file2 ==NULL) { fprintf(stdout, ".dat failed to open. exiting...\n"); exit(1); } cudaError_t cudaError; int i; struct timeval start, stop; // Disable buffering entirely setbuf(stdout, NULL); // Allocating arrays on host printf("Allocating matA.h_rows and matB.h_rows on host..."); gettimeofday(&start, NULL); matA.h_rows = (float*)malloc((matA.height*matA.width)*sizeof(float)); matB.h_rows = (float*)malloc((matB.height*matB.width)*sizeof(float)); // check malloc memory allocation if (matA.h_rows == NULL || matB.h_rows == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array matA.d_rows and matB.d_rows on device..."); gettimeofday(&start, NULL); cudaError = cudaMalloc(&matA.d_rows, matA.height*matA.width*sizeof(float)); // check cudaMalloc memory allocation if (cudaError != cudaSuccess) { printf("cudaMalloc matA.d_rows returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError); return 1; } cudaError = cudaMalloc(&matB.d_rows, matB.height*matB.width*sizeof(float)); // check cudaMalloc memory allocation if (cudaError != cudaSuccess) { printf("cudaMalloc matB.d_rows returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Initialize host memory printf("Initializing array matA and matB on host..."); gettimeofday(&start, NULL); load_matrix(&matA, file1); load_matrix(&matB, file2); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array matC.d_rows on device..."); gettimeofday(&start, NULL); Matrix matC; matC.height = matA.height; matC.width = matB.width; matC.h_rows = (float*)malloc((matC.height*matC.width)*sizeof(float)); // check malloc memory allocation if (matC.h_rows == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } cudaError = cudaMalloc(&matC.d_rows, matC.height*matC.width*sizeof(float)); // check cudaMalloc memory allocation if (cudaError != cudaSuccess) { printf("cudaMalloc matC.d_rows returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from host to device printf("Copying arrays from host to device..."); gettimeofday(&start, NULL); cudaError = cudaMemcpy(matA.d_rows, matA.h_rows, (matA.width*matA.height)*sizeof(float), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { printf("cudaMemcpy matA.d_rows -> matA.h_rows returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } cudaError = cudaMemcpy(matB.d_rows, matB.h_rows, (matB.width*matB.height)*sizeof(float), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { printf("cudaMemcpy matB.d_rows -> matB.h_rows returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } cudaError = cudaMemcpy(matC.d_rows, matC.h_rows, (matC.width*matC.height)*sizeof(float), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { printf("cudaMemcpy matC.d_rows -> matC.h_rows returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Run kernel on elements on the GPU printf("Running kernel on elements of matA.d_rows ..."); gettimeofday(&start, NULL); float valorAantes = matA.h_rows[0]; scalar_matrix_mult(scalar, &matA); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from device to host printf("Copying array from device matA.d_rows to host matA.h_rows..."); gettimeofday(&start, NULL); cudaError = cudaMemcpy(matA.h_rows,matA.d_rows, (matA.height*matA.width) * sizeof(float), cudaMemcpyDeviceToHost); if (cudaError != cudaSuccess) { printf("cudaMemcpy matA.d_rows -> matA.h_rows returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); //Check for errors printf("Checking for processing errors for multiplication of matrix a and scalar..."); gettimeofday(&start, NULL); float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < (matA.height*matA.width); i++) { maxError = (maxError > (diffError=fabs(matA.h_rows[0]-scalar*valorAantes)))? maxError : diffError; //printf("%d -> %f\n", i, matA.h_rows[i]); } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); printf("Max error: %f\n", maxError); //print matrixA print_matrix(&matA); if(matrix_matrix_mult(&matA, &matB, &matC) !=1) return 0; // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Copy array from device to host printf("Copying array from device matC.d_rows to host matC.h_rows...\n"); gettimeofday(&start, NULL); cudaError = cudaMemcpy(matC.h_rows,matC.d_rows, (matC.height*matC.width) * sizeof(float), cudaMemcpyDeviceToHost); if (cudaError != cudaSuccess) { printf("cudaMemcpy matC.d_rows -> matC.h_rows returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } print_matrix(&matC); // Free memory printf("Freeing memory..."); gettimeofday(&start, NULL); cudaFree(matA.d_rows); cudaFree(matB.d_rows); free(matA.h_rows); free(matB.h_rows); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); for(int i=0; i<matA.height*matA.width; i++){ fwrite((void*)(&matA.h_rows[i]), sizeof(matA.h_rows[i]), 1, result1); } for(int i=0; i<matC.height*matC.width; i++){ fwrite((void*)(&matC.h_rows[i]), sizeof(matC.h_rows[i]), 1, result2); } fclose(file1); fclose(file2); fclose(result1); fclose(result2); return 0; }
52065afa5f53356b0af1d3e43b2c6638b0f3d027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <thrust/sort.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "nms_only_op.h" namespace caffe2 { namespace { __device__ void _copy(const int size, const float* source, float* target) { for (int i=0; i<size; i++) { *(target++) = *(source++); } } __global__ void GetValuesAndIndices(const int nthreads, const float* boxes, float* values, TIndex* indices) { CUDA_1D_KERNEL_LOOP(index, nthreads) { values[index] = boxes[index * 13 + 4]; indices[index] = static_cast<TIndex>(index); } } __global__ void ComputeOverlapping(const int nthreads, const float* input_boxes, const TIndex* indices, const int num_total, float* overlaps) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idA = index / num_total; int idB = index % num_total; // if idA < idB, then the score should be higher if (idA < idB) { const int iidA = static_cast<int>(indices[idA]) * 13; const int iidB = static_cast<int>(indices[idB]) * 13; const float x1A = input_boxes[iidA]; const float y1A = input_boxes[iidA+1]; const float x2A = input_boxes[iidA+2]; const float y2A = input_boxes[iidA+3]; const float areaA = input_boxes[iidA+5]; const float x1B = input_boxes[iidB]; const float y1B = input_boxes[iidB+1]; const float x2B = input_boxes[iidB+2]; const float y2B = input_boxes[iidB+3]; const float areaB = input_boxes[iidB+5]; const float xx1 = (x1A > x1B) ? x1A : x1B; const float yy1 = (y1A > y1B) ? y1A : y1B; const float xx2 = (x2A < x2B) ? x2A : x2B; const float yy2 = (y2A < y2B) ? y2A : y2B; float w = xx2 - xx1 + 1.; w = (w > 0.) ? w : 0.; float h = yy2 - yy1 + 1.; h = (h > 0.) ? h : 0.; const float inter = w * h; overlaps[idA * num_total + idB] = inter / (areaA + areaB - inter); } } } __global__ void NMSOnlyForward(const int nthreads, const float* overlaps, const TIndex* indices, const int num_total, const float threshold, const int top_n, float* output_boxes, int* output_index, int* cnt) { CUDA_1D_KERNEL_LOOP(index, nthreads) { *cnt = 0; for (int i=0; i<num_total; i++) { const int id = static_cast<int>(indices[i]); // make sure we will change for every box if (output_boxes[id * 13 + 7] < 1.) { for (int j=i+1; j<num_total; j++) { if (overlaps[i * num_total + j] >= threshold) { const int jd = static_cast<int>(indices[j]); output_boxes[jd * 13 + 7] = 1.; } } // should be the actual index output_index[(*cnt)] = id; (*cnt)++; } // enough boxes, still assign box if ((*cnt) == top_n) { for (int j=i+1; j<num_total; j++) { const int jd = static_cast<int>(indices[j]); output_boxes[jd * 13 + 7] = 1.; } break; } } } } __global__ void CopyBoxes(const int nthreads, const float* boxes, float* output_boxes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int bid = i * 13; const int oid = i * 12; _copy(7, boxes + bid, output_boxes + oid); _copy(5, boxes + bid + 8, output_boxes + oid + 7); } } __global__ void NMSOnlyReduceBoxes(const int nthreads, const float* boxes, const int* index, float* output_boxes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int id = index[i]; const int bid = id * 13; const int oid = i * 12; _copy(7, boxes + bid, output_boxes + oid); _copy(5, boxes + bid + 8, output_boxes + oid + 7); } } } // namespace template<> bool NMSOnlyOp<float, CUDAContext>::RunOnDevice() { auto& boxes = Input(0); DCHECK_EQ(boxes.dim32(1), 13); const int num_total = boxes.dim32(0); // handle the empty case if (num_total == 0) { Output(0)->Resize(0, 12); Output(0)->mutable_data<float>(); return true; } else if (num_total == 1) { auto* output_boxes = Output(0); output_boxes->Resize(1, 12); hipLaunchKernelGGL(( CopyBoxes), dim3(CAFFE_GET_BLOCKS(1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), 1, boxes.data<float>(), output_boxes->mutable_data<float>()); return true; } const int num_pair = num_total * num_total; const float* boxes_pointer = boxes.data<float>(); values.Resize(num_total); indices.Resize(num_total); float* values_pointer = values.mutable_data<float>(); TIndex* indices_pointer = indices.mutable_data<TIndex>(); hipLaunchKernelGGL(( GetValuesAndIndices), dim3(CAFFE_GET_BLOCKS(num_total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_total, boxes_pointer, values_pointer, indices_pointer); // sort the value and get the indexes thrust::sort_by_key(thrust::hip::par.on(context_.cuda_stream()), values_pointer, values_pointer + num_total, indices_pointer, thrust::greater<float>()); // pairwise comparison overlaps.Resize(num_total, num_total); float* overlaps_pointer = overlaps.mutable_data<float>(); // initialize everything math::Set<float, CUDAContext>(num_pair, 0., overlaps_pointer, &context_); hipLaunchKernelGGL(( ComputeOverlapping), dim3(CAFFE_GET_BLOCKS(num_pair)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_pair, boxes_pointer, indices_pointer, num_total, overlaps_pointer); // then just reduce by setting up the index middle.ResizeLike(boxes); float* middle_pointer = middle.mutable_data<float>(); context_.Copy<float, CUDAContext, CUDAContext>(num_total * 13, boxes_pointer, middle_pointer); // also remember the index mindex.Resize(num_total); int* mindex_pointer = mindex.mutable_data<int>(); math::Set<int, CUDAContext>(num_total, -1, mindex_pointer, &context_); mcounter.Resize(1); int* mcounter_pointer = mcounter.mutable_data<int>(); // using one thread to go down the list hipLaunchKernelGGL(( NMSOnlyForward), dim3(CAFFE_GET_BLOCKS(1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), 1, overlaps_pointer, indices_pointer, num_total, nms_, dpi_, middle_pointer, mindex_pointer, mcounter_pointer); // get the counter value int num_reduced; context_.Copy<int, CUDAContext, CPUContext>(1, mcounter_pointer, &num_reduced); // then only copy the valid results auto* out_boxes = Output(0); out_boxes->Resize(num_reduced, 12); float* out_boxes_pointer = out_boxes->mutable_data<float>(); hipLaunchKernelGGL(( NMSOnlyReduceBoxes), dim3(CAFFE_GET_BLOCKS(num_reduced)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_reduced, middle_pointer, mindex_pointer, out_boxes_pointer); return true; } REGISTER_CUDA_OPERATOR(NMSOnly, NMSOnlyOp<float, CUDAContext>); } // namespace caffe2
52065afa5f53356b0af1d3e43b2c6638b0f3d027.cu
#include <cfloat> #include <thrust/sort.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "nms_only_op.h" namespace caffe2 { namespace { __device__ void _copy(const int size, const float* source, float* target) { for (int i=0; i<size; i++) { *(target++) = *(source++); } } __global__ void GetValuesAndIndices(const int nthreads, const float* boxes, float* values, TIndex* indices) { CUDA_1D_KERNEL_LOOP(index, nthreads) { values[index] = boxes[index * 13 + 4]; indices[index] = static_cast<TIndex>(index); } } __global__ void ComputeOverlapping(const int nthreads, const float* input_boxes, const TIndex* indices, const int num_total, float* overlaps) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int idA = index / num_total; int idB = index % num_total; // if idA < idB, then the score should be higher if (idA < idB) { const int iidA = static_cast<int>(indices[idA]) * 13; const int iidB = static_cast<int>(indices[idB]) * 13; const float x1A = input_boxes[iidA]; const float y1A = input_boxes[iidA+1]; const float x2A = input_boxes[iidA+2]; const float y2A = input_boxes[iidA+3]; const float areaA = input_boxes[iidA+5]; const float x1B = input_boxes[iidB]; const float y1B = input_boxes[iidB+1]; const float x2B = input_boxes[iidB+2]; const float y2B = input_boxes[iidB+3]; const float areaB = input_boxes[iidB+5]; const float xx1 = (x1A > x1B) ? x1A : x1B; const float yy1 = (y1A > y1B) ? y1A : y1B; const float xx2 = (x2A < x2B) ? x2A : x2B; const float yy2 = (y2A < y2B) ? y2A : y2B; float w = xx2 - xx1 + 1.; w = (w > 0.) ? w : 0.; float h = yy2 - yy1 + 1.; h = (h > 0.) ? h : 0.; const float inter = w * h; overlaps[idA * num_total + idB] = inter / (areaA + areaB - inter); } } } __global__ void NMSOnlyForward(const int nthreads, const float* overlaps, const TIndex* indices, const int num_total, const float threshold, const int top_n, float* output_boxes, int* output_index, int* cnt) { CUDA_1D_KERNEL_LOOP(index, nthreads) { *cnt = 0; for (int i=0; i<num_total; i++) { const int id = static_cast<int>(indices[i]); // make sure we will change for every box if (output_boxes[id * 13 + 7] < 1.) { for (int j=i+1; j<num_total; j++) { if (overlaps[i * num_total + j] >= threshold) { const int jd = static_cast<int>(indices[j]); output_boxes[jd * 13 + 7] = 1.; } } // should be the actual index output_index[(*cnt)] = id; (*cnt)++; } // enough boxes, still assign box if ((*cnt) == top_n) { for (int j=i+1; j<num_total; j++) { const int jd = static_cast<int>(indices[j]); output_boxes[jd * 13 + 7] = 1.; } break; } } } } __global__ void CopyBoxes(const int nthreads, const float* boxes, float* output_boxes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int bid = i * 13; const int oid = i * 12; _copy(7, boxes + bid, output_boxes + oid); _copy(5, boxes + bid + 8, output_boxes + oid + 7); } } __global__ void NMSOnlyReduceBoxes(const int nthreads, const float* boxes, const int* index, float* output_boxes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int id = index[i]; const int bid = id * 13; const int oid = i * 12; _copy(7, boxes + bid, output_boxes + oid); _copy(5, boxes + bid + 8, output_boxes + oid + 7); } } } // namespace template<> bool NMSOnlyOp<float, CUDAContext>::RunOnDevice() { auto& boxes = Input(0); DCHECK_EQ(boxes.dim32(1), 13); const int num_total = boxes.dim32(0); // handle the empty case if (num_total == 0) { Output(0)->Resize(0, 12); Output(0)->mutable_data<float>(); return true; } else if (num_total == 1) { auto* output_boxes = Output(0); output_boxes->Resize(1, 12); CopyBoxes<<<CAFFE_GET_BLOCKS(1), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(1, boxes.data<float>(), output_boxes->mutable_data<float>()); return true; } const int num_pair = num_total * num_total; const float* boxes_pointer = boxes.data<float>(); values.Resize(num_total); indices.Resize(num_total); float* values_pointer = values.mutable_data<float>(); TIndex* indices_pointer = indices.mutable_data<TIndex>(); GetValuesAndIndices<<<CAFFE_GET_BLOCKS(num_total), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_total, boxes_pointer, values_pointer, indices_pointer); // sort the value and get the indexes thrust::sort_by_key(thrust::cuda::par.on(context_.cuda_stream()), values_pointer, values_pointer + num_total, indices_pointer, thrust::greater<float>()); // pairwise comparison overlaps.Resize(num_total, num_total); float* overlaps_pointer = overlaps.mutable_data<float>(); // initialize everything math::Set<float, CUDAContext>(num_pair, 0., overlaps_pointer, &context_); ComputeOverlapping<<<CAFFE_GET_BLOCKS(num_pair), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_pair, boxes_pointer, indices_pointer, num_total, overlaps_pointer); // then just reduce by setting up the index middle.ResizeLike(boxes); float* middle_pointer = middle.mutable_data<float>(); context_.Copy<float, CUDAContext, CUDAContext>(num_total * 13, boxes_pointer, middle_pointer); // also remember the index mindex.Resize(num_total); int* mindex_pointer = mindex.mutable_data<int>(); math::Set<int, CUDAContext>(num_total, -1, mindex_pointer, &context_); mcounter.Resize(1); int* mcounter_pointer = mcounter.mutable_data<int>(); // using one thread to go down the list NMSOnlyForward<<<CAFFE_GET_BLOCKS(1), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(1, overlaps_pointer, indices_pointer, num_total, nms_, dpi_, middle_pointer, mindex_pointer, mcounter_pointer); // get the counter value int num_reduced; context_.Copy<int, CUDAContext, CPUContext>(1, mcounter_pointer, &num_reduced); // then only copy the valid results auto* out_boxes = Output(0); out_boxes->Resize(num_reduced, 12); float* out_boxes_pointer = out_boxes->mutable_data<float>(); NMSOnlyReduceBoxes<<<CAFFE_GET_BLOCKS(num_reduced), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_reduced, middle_pointer, mindex_pointer, out_boxes_pointer); return true; } REGISTER_CUDA_OPERATOR(NMSOnly, NMSOnlyOp<float, CUDAContext>); } // namespace caffe2
dbc7ac0727eeb645220825c8dea7459bd29d7ec6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <UnitTest++.h> #include <memory> #include <vector> #include <array> #include "MonteRay_SphericalGrid.hh" #include "MonteRay_SpatialGrid.hh" #include "MonteRayVector3D.hh" #include "MonteRayConstants.hh" #include "MonteRayCopyMemory.t.hh" using namespace MonteRay; namespace MonteRay_SphericalGrid_on_GPU_tester{ SUITE( MonteRay_SphericalGrid_GPU_basic_tests ) { #ifdef __HIPCC__ using Grid_t = MonteRay_SphericalGrid; using GridBins_t = MonteRay_GridBins; using GridBins_t = Grid_t::GridBins_t; using pGridInfo_t = GridBins_t*; using pArrayOfpGridInfo_t = Grid_t::pArrayOfpGridInfo_t; typedef MonteRay::Vector3D<gpuRayFloat_t> Position_t; class gridTestData { public: enum coord {R,DIM}; gridTestData(){ std::vector<gpuRayFloat_t> vertices = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; pGridInfo[0] = new GridBins_t(); pGridInfo[0]->initialize( vertices ); } ~gridTestData(){ delete pGridInfo[0]; } pArrayOfpGridInfo_t pGridInfo; }; template<typename T> class resultClass : public CopyMemoryBase<resultClass<T>> { public: using Base = CopyMemoryBase<resultClass>; resultClass() : CopyMemoryBase<resultClass<T>>() { init(); } ~resultClass(){} std::string className(){ return std::string("resultClass");} void init() { v = T(0); } void copyToGPU(void) { //std::cout << "Debug: resultClass::copyToGPU \n"; Base::copyToGPU(); } void copyToCPU(void) { //std::cout << "Debug: resultClass::copyToCPU \n"; Base::copyToCPU(); } void copy(const resultClass* rhs) { #ifndef NDEBUG if( this->debug ) { std::cout << "Debug: 1- resultClass::copy(const resultClass* rhs) \n"; } #endif if( this->isCudaIntermediate && rhs->isCudaIntermediate ) { throw std::runtime_error("resultClass::copy -- can NOT copy CUDA intermediate to CUDA intermediate."); } if( !this->isCudaIntermediate && !rhs->isCudaIntermediate ) { throw std::runtime_error("resultClass::copy -- can NOT copy CUDA non-intermediate to CUDA non-intermediate."); } v = rhs->v; } T v; }; // kernal call CUDA_CALLABLE_KERNEL kernelSphericalGridGetNumBins(Grid_t** pGrid, resultClass<unsigned>* pResult, unsigned d) { pResult->v = (*pGrid)->getNumBins(d); } TEST( getNumBins_on_GPU ) { enum coord {R,DIM}; gridTestData data; resultClass<unsigned>* pResult = new resultClass<unsigned>(); pResult->copyToGPU(); std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); // Grid_t** devicePtr; // data.pGridInfo[X]->copyToGPU(); // data.pGridInfo[Y]->copyToGPU(); // data.pGridInfo[Z]->copyToGPU(); // createDeviceInstance<<<1,1>>>( devicePtr, data.pGridInfo[X]->devicePtr, data.pGridInfo[Y]->devicePtr, data.pGridInfo[Z]->devicePtr ); hipStreamSynchronize(0); gpuErrchk( hipPeekAtLastError() ); //printf( "Debug: devicePtr = %d\n", devicePtr ); hipLaunchKernelGGL(( kernelSphericalGridGetNumBins), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, 0); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 10, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetNumBins), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, 1); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetNumBins), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, 2); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); delete pResult; } // // kernal call CUDA_CALLABLE_KERNEL kernelSphericalGridGetIndex(Grid_t** pGrid, resultClass<unsigned>* pResult, Position_t pos) { //printf("Debug: kernelSphericalGridGetIndex -- calling pGrid->getIndex(pos)\n"); unsigned index = (*pGrid)->getIndex(pos); pResult->v = index; } TEST( getIndex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); resultClass<unsigned>* pResult = new resultClass<unsigned>(); pResult->copyToGPU(); Position_t pos1( -0.5, -0.5, -0.5 ); Position_t pos2( -1.5, 0.0, 0.0 ); Position_t pos3( 2.5, 0.0, 0.0 ); Position_t pos4( 0.0, -3.5, 0.0 ); Position_t pos5( 0.0, 4.5, 0.0 ); Position_t pos6( 0.0, 0.0, -5.5 ); Position_t pos7( 0.0, 0.0, 6.5 ); Position_t pos8( 5.5, 5.5, 5.5 ); Position_t pos9( 10.0, 10.0, 10.0 ); hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos1); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos2); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 1, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos3); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 2, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos4); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 3, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos5); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 4, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos6); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 5, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos7); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 6, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos8); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 9, pResult->v ); pResult->v = 0; hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos9); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( UINT_MAX, pResult->v ); pResult->v = 0; // delete pResult; } CUDA_CALLABLE_KERNEL kernelGetRadialIndexFromR(Grid_t** pPtrGrid, resultClass<int>* pResult, gpuRayFloat_t R ) { pResult->v = (*pPtrGrid)->getRadialIndexFromR(R); } CUDA_CALLABLE_KERNEL kernelGetRadialIndexFromRSq(Grid_t** pPtrGrid, resultClass<int>* pResult, gpuRayFloat_t RSq ) { pResult->v = (*pPtrGrid)->getRadialIndexFromRSq(RSq); } CUDA_CALLABLE_KERNEL kernelSphericalGridIsIndexOutside(Grid_t** pGrid, resultClass<bool>* pResult, unsigned d, int index) { pResult->v = (*pGrid)->isIndexOutside(d,index); } CUDA_CALLABLE_KERNEL kernelSphericalGridIsOutside(Grid_t** pGrid, resultClass<bool>* pResult, int i, int j, int k ) { int indices[] = {i,j,k}; pResult->v = (*pGrid)->isOutside(indices); } class SphericalGridGPUTester { public: SphericalGridGPUTester(){ pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); } ~SphericalGridGPUTester(){} int getRadialIndexFromR( gpuRayFloat_t R ) { using result_t = resultClass<int>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelGetRadialIndexFromR), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, R ); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } int getRadialIndexFromRSq( gpuRayFloat_t RSq ) { using result_t = resultClass<int>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelGetRadialIndexFromRSq), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, RSq ); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } unsigned getIndex( Position_t pos) { using result_t = resultClass<unsigned>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelSphericalGridGetIndex), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, pos); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } bool isIndexOutside( unsigned d, int index ) { using result_t = resultClass<bool>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelSphericalGridIsIndexOutside), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, d, index); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } bool isOutside( const int indices[]) { using result_t = resultClass<bool>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelSphericalGridIsOutside), dim3(1),dim3(1), 0, 0, pGrid->ptrDevicePtr, pResult->devicePtr, indices[0], indices[1], indices[2]); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } gridTestData data; std::unique_ptr<Grid_t> pGrid; }; TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_outside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 10, getRadialIndexFromR( 10.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_inside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromR( 9.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_insideOnVertex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromR( 9.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_center ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 0, getRadialIndexFromR( 0.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_outside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 10, getRadialIndexFromRSq( 10.5*10.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_inside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromRSq( 9.5*9.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_insideOnVertex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromRSq( 9.0*9.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_center ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 0, getRadialIndexFromRSq( 0.0 ) ); } TEST_FIXTURE(SphericalGridGPUTester, isOutside_index ) { CHECK_EQUAL( true, isIndexOutside(0, 10) ); } TEST_FIXTURE(SphericalGridGPUTester, isOutside_Radius_false ) { CHECK_EQUAL( false, isIndexOutside(0, 9) ); } CUDA_CALLABLE_KERNEL kernelSphericalGridGetVolume(Grid_t** pGrid, resultClass<gpuRayFloat_t>* pResult, unsigned i ) { pResult->v = (*pGrid)->getVolume(i); } gpuRayFloat_t getVolume(Grid_t& grid, unsigned i ) { using result_t = resultClass<gpuRayFloat_t>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); hipLaunchKernelGGL(( kernelSphericalGridGetVolume), dim3(1),dim3(1), 0, 0, grid.ptrDevicePtr, pResult->devicePtr, i); gpuErrchk( hipPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } TEST( getVolume ) { pGridInfo_t* pGridInfo = new pGridInfo_t[3]; pGridInfo[0] = new GridBins_t(); pGridInfo[1] = new GridBins_t(); pGridInfo[2] = new GridBins_t(); std::vector<gpuRayFloat_t> vertices = { 1.0, 2.0, 3.0 }; pGridInfo[0]->initialize( vertices ); Grid_t grid(1,pGridInfo); grid.copyToGPU(); CHECK_CLOSE( (1.0)*(4.0/3.0)*pi, getVolume(grid, 0), 1e-5 ); CHECK_CLOSE( (8.0-1.0)*(4.0/3.0)*pi, getVolume(grid, 1), 1e-4 ); delete pGridInfo[0]; delete pGridInfo[1]; delete pGridInfo[2]; delete[] pGridInfo; } #endif } } // end namespace
dbc7ac0727eeb645220825c8dea7459bd29d7ec6.cu
#include <UnitTest++.h> #include <memory> #include <vector> #include <array> #include "MonteRay_SphericalGrid.hh" #include "MonteRay_SpatialGrid.hh" #include "MonteRayVector3D.hh" #include "MonteRayConstants.hh" #include "MonteRayCopyMemory.t.hh" using namespace MonteRay; namespace MonteRay_SphericalGrid_on_GPU_tester{ SUITE( MonteRay_SphericalGrid_GPU_basic_tests ) { #ifdef __CUDACC__ using Grid_t = MonteRay_SphericalGrid; using GridBins_t = MonteRay_GridBins; using GridBins_t = Grid_t::GridBins_t; using pGridInfo_t = GridBins_t*; using pArrayOfpGridInfo_t = Grid_t::pArrayOfpGridInfo_t; typedef MonteRay::Vector3D<gpuRayFloat_t> Position_t; class gridTestData { public: enum coord {R,DIM}; gridTestData(){ std::vector<gpuRayFloat_t> vertices = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; pGridInfo[0] = new GridBins_t(); pGridInfo[0]->initialize( vertices ); } ~gridTestData(){ delete pGridInfo[0]; } pArrayOfpGridInfo_t pGridInfo; }; template<typename T> class resultClass : public CopyMemoryBase<resultClass<T>> { public: using Base = CopyMemoryBase<resultClass>; resultClass() : CopyMemoryBase<resultClass<T>>() { init(); } ~resultClass(){} std::string className(){ return std::string("resultClass");} void init() { v = T(0); } void copyToGPU(void) { //std::cout << "Debug: resultClass::copyToGPU \n"; Base::copyToGPU(); } void copyToCPU(void) { //std::cout << "Debug: resultClass::copyToCPU \n"; Base::copyToCPU(); } void copy(const resultClass* rhs) { #ifndef NDEBUG if( this->debug ) { std::cout << "Debug: 1- resultClass::copy(const resultClass* rhs) \n"; } #endif if( this->isCudaIntermediate && rhs->isCudaIntermediate ) { throw std::runtime_error("resultClass::copy -- can NOT copy CUDA intermediate to CUDA intermediate."); } if( !this->isCudaIntermediate && !rhs->isCudaIntermediate ) { throw std::runtime_error("resultClass::copy -- can NOT copy CUDA non-intermediate to CUDA non-intermediate."); } v = rhs->v; } T v; }; // kernal call CUDA_CALLABLE_KERNEL kernelSphericalGridGetNumBins(Grid_t** pGrid, resultClass<unsigned>* pResult, unsigned d) { pResult->v = (*pGrid)->getNumBins(d); } TEST( getNumBins_on_GPU ) { enum coord {R,DIM}; gridTestData data; resultClass<unsigned>* pResult = new resultClass<unsigned>(); pResult->copyToGPU(); std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); // Grid_t** devicePtr; // data.pGridInfo[X]->copyToGPU(); // data.pGridInfo[Y]->copyToGPU(); // data.pGridInfo[Z]->copyToGPU(); // createDeviceInstance<<<1,1>>>( devicePtr, data.pGridInfo[X]->devicePtr, data.pGridInfo[Y]->devicePtr, data.pGridInfo[Z]->devicePtr ); cudaStreamSynchronize(0); gpuErrchk( cudaPeekAtLastError() ); //printf( "Debug: devicePtr = %d\n", devicePtr ); kernelSphericalGridGetNumBins<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, 0); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 10, pResult->v ); pResult->v = 0; kernelSphericalGridGetNumBins<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, 1); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); pResult->v = 0; kernelSphericalGridGetNumBins<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, 2); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); delete pResult; } // // kernal call CUDA_CALLABLE_KERNEL kernelSphericalGridGetIndex(Grid_t** pGrid, resultClass<unsigned>* pResult, Position_t pos) { //printf("Debug: kernelSphericalGridGetIndex -- calling pGrid->getIndex(pos)\n"); unsigned index = (*pGrid)->getIndex(pos); pResult->v = index; } TEST( getIndex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); resultClass<unsigned>* pResult = new resultClass<unsigned>(); pResult->copyToGPU(); Position_t pos1( -0.5, -0.5, -0.5 ); Position_t pos2( -1.5, 0.0, 0.0 ); Position_t pos3( 2.5, 0.0, 0.0 ); Position_t pos4( 0.0, -3.5, 0.0 ); Position_t pos5( 0.0, 4.5, 0.0 ); Position_t pos6( 0.0, 0.0, -5.5 ); Position_t pos7( 0.0, 0.0, 6.5 ); Position_t pos8( 5.5, 5.5, 5.5 ); Position_t pos9( 10.0, 10.0, 10.0 ); kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos1); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 0, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos2); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 1, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos3); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 2, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos4); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 3, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos5); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 4, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos6); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 5, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos7); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 6, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos8); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( 9, pResult->v ); pResult->v = 0; kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos9); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); CHECK_EQUAL( UINT_MAX, pResult->v ); pResult->v = 0; // delete pResult; } CUDA_CALLABLE_KERNEL kernelGetRadialIndexFromR(Grid_t** pPtrGrid, resultClass<int>* pResult, gpuRayFloat_t R ) { pResult->v = (*pPtrGrid)->getRadialIndexFromR(R); } CUDA_CALLABLE_KERNEL kernelGetRadialIndexFromRSq(Grid_t** pPtrGrid, resultClass<int>* pResult, gpuRayFloat_t RSq ) { pResult->v = (*pPtrGrid)->getRadialIndexFromRSq(RSq); } CUDA_CALLABLE_KERNEL kernelSphericalGridIsIndexOutside(Grid_t** pGrid, resultClass<bool>* pResult, unsigned d, int index) { pResult->v = (*pGrid)->isIndexOutside(d,index); } CUDA_CALLABLE_KERNEL kernelSphericalGridIsOutside(Grid_t** pGrid, resultClass<bool>* pResult, int i, int j, int k ) { int indices[] = {i,j,k}; pResult->v = (*pGrid)->isOutside(indices); } class SphericalGridGPUTester { public: SphericalGridGPUTester(){ pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); pGrid->copyToGPU(); } ~SphericalGridGPUTester(){} int getRadialIndexFromR( gpuRayFloat_t R ) { using result_t = resultClass<int>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelGetRadialIndexFromR<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, R ); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } int getRadialIndexFromRSq( gpuRayFloat_t RSq ) { using result_t = resultClass<int>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelGetRadialIndexFromRSq<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, RSq ); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } unsigned getIndex( Position_t pos) { using result_t = resultClass<unsigned>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelSphericalGridGetIndex<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, pos); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } bool isIndexOutside( unsigned d, int index ) { using result_t = resultClass<bool>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelSphericalGridIsIndexOutside<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, d, index); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } bool isOutside( const int indices[]) { using result_t = resultClass<bool>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelSphericalGridIsOutside<<<1,1>>>( pGrid->ptrDevicePtr, pResult->devicePtr, indices[0], indices[1], indices[2]); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } gridTestData data; std::unique_ptr<Grid_t> pGrid; }; TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_outside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 10, getRadialIndexFromR( 10.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_inside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromR( 9.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_insideOnVertex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromR( 9.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromR_center ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 0, getRadialIndexFromR( 0.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_outside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 10, getRadialIndexFromRSq( 10.5*10.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_inside ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromRSq( 9.5*9.5 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_insideOnVertex ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 9, getRadialIndexFromRSq( 9.0*9.0 ) ); } TEST_FIXTURE( SphericalGridGPUTester, getRadialIndexFromRSq_center ) { gridTestData data; std::unique_ptr<Grid_t> pGrid = std::unique_ptr<Grid_t>( new Grid_t(1,data.pGridInfo)); CHECK_EQUAL( 0, getRadialIndexFromRSq( 0.0 ) ); } TEST_FIXTURE(SphericalGridGPUTester, isOutside_index ) { CHECK_EQUAL( true, isIndexOutside(0, 10) ); } TEST_FIXTURE(SphericalGridGPUTester, isOutside_Radius_false ) { CHECK_EQUAL( false, isIndexOutside(0, 9) ); } CUDA_CALLABLE_KERNEL kernelSphericalGridGetVolume(Grid_t** pGrid, resultClass<gpuRayFloat_t>* pResult, unsigned i ) { pResult->v = (*pGrid)->getVolume(i); } gpuRayFloat_t getVolume(Grid_t& grid, unsigned i ) { using result_t = resultClass<gpuRayFloat_t>; std::unique_ptr<result_t> pResult = std::unique_ptr<result_t> ( new result_t() ); pResult->copyToGPU(); kernelSphericalGridGetVolume<<<1,1>>>( grid.ptrDevicePtr, pResult->devicePtr, i); gpuErrchk( cudaPeekAtLastError() ); pResult->copyToCPU(); return pResult->v; } TEST( getVolume ) { pGridInfo_t* pGridInfo = new pGridInfo_t[3]; pGridInfo[0] = new GridBins_t(); pGridInfo[1] = new GridBins_t(); pGridInfo[2] = new GridBins_t(); std::vector<gpuRayFloat_t> vertices = { 1.0, 2.0, 3.0 }; pGridInfo[0]->initialize( vertices ); Grid_t grid(1,pGridInfo); grid.copyToGPU(); CHECK_CLOSE( (1.0)*(4.0/3.0)*pi, getVolume(grid, 0), 1e-5 ); CHECK_CLOSE( (8.0-1.0)*(4.0/3.0)*pi, getVolume(grid, 1), 1e-4 ); delete pGridInfo[0]; delete pGridInfo[1]; delete pGridInfo[2]; delete[] pGridInfo; } #endif } } // end namespace
38daaba1188e20acfc8716462cf8147f3a452847.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <algorithm> #include <iterator> #include <set> #include <string> #include <GL/glew.h> #include <GL/wglew.h> #include <GL/freeglut.h> #include <SFML/System.hpp> #include <SFML/Window.hpp> #include <SFML/Graphics.hpp> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <device_launch_parameters.h> #include <cuda_gl_interop.h> #define NBody 16384 #define NB_THREAD 128 #define ScreenWidth 1024 #define ScreenHeight 768 #define invNBody (1.0f / NBody) //Variable de "vitesse de simulation" pour viter une simulation explosive __global__ void NBodyCUDA(float2 *nvBodyPos, float2 *nvBodyV, float2 *nvBodyDest, float tempInvNbody) { int index = threadIdx.x + blockIdx.x * blockDim.x; //Step 3 Simuler l'interaction du body courant avec tous les autres bodies de nvBodyPos, ajouter la vitesse, appliquer la vitesse float2 tempForce = { 0, 0 }; for(int i = 0; i < NBody; i++) { float intGrav = 0; intGrav = -1.0f * (0.00001f + (nvBodyPos[index].x - nvBodyPos[i].x)*(nvBodyPos[index].x - nvBodyPos[i].x) + (nvBodyPos[index].y - nvBodyPos[i].y)*(nvBodyPos[index].y - nvBodyPos[i].y)); tempForce.x += (nvBodyPos[index].x - nvBodyPos[i].x) / intGrav; tempForce.y += (nvBodyPos[index].y - nvBodyPos[i].y) / intGrav; } nvBodyV[index].x += tempForce.x * tempInvNbody; nvBodyV[index].y += tempForce.y * tempInvNbody; //Step 2 Copier la bonne donne de nvBodyPos dans nvBodyDest nvBodyDest[index].x = nvBodyPos[index].x + nvBodyV[index].x; nvBodyDest[index].y = nvBodyPos[index].y + nvBodyV[index].y; } int main() { //Allocation des tableaux de dpart float2* BodyPos = (float2 *)malloc(NBody * sizeof(float2)); float2* BodyV = (float2 *)malloc(NBody * sizeof(float2)); //Allocation des tableaux de donnes initiales float2* nvBodyPos; hipMalloc(&nvBodyPos, NBody * sizeof(float2)); float2* nvBodyV; hipMalloc(&nvBodyV, NBody * sizeof(float2)); float2* nvBodyDest; hipMalloc(&nvBodyDest, NBody * sizeof(float2)); //Verification d'erreur if (nvBodyPos == NULL || nvBodyDest == NULL || nvBodyV == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } //position initiale alatoire srand(time(NULL)); for (int i = 0; i < NBody; i++) { BodyPos[i].x = (ScreenWidth / 2) + 600 * (-0.5 + (rand() / (float)RAND_MAX)); BodyPos[i].y = (ScreenHeight / 2) + 300 * (-0.5 + (rand() / (float)RAND_MAX)); } //Initialisation de SFML sf::VertexArray tempArray; tempArray.resize(NBody); tempArray.setPrimitiveType(sf::Points); sf::RenderWindow window(sf::VideoMode(1024, 768), "My window"); int blockSize = NB_THREAD; int gridSize = (NBody + blockSize - 1) / blockSize; while (window.isOpen()) { sf::Event event; while (window.pollEvent(event)) { } if (sf::Keyboard::isKeyPressed(sf::Keyboard::Escape)) exit(0); //Step 1 //Copie des positions vers le GPU hipMemcpy(nvBodyPos, BodyPos, NBody * sizeof(float2), hipMemcpyHostToDevice); //Lancement du kernel hipLaunchKernelGGL(( NBodyCUDA) , dim3(gridSize), dim3(blockSize) , 0, 0, nvBodyPos, nvBodyV, nvBodyDest, invNBody); //Rcupration des nouvelles positions hipMemcpy(BodyPos, nvBodyDest, NBody * sizeof(float2), hipMemcpyDeviceToHost); //Copie des donnes dans la Vertex Array pour l'affichage for (int i = 0; i < NBody; i++) { tempArray[i].position.x = BodyPos[i].x; tempArray[i].position.y = BodyPos[i].y; } //Affichage window.clear(sf::Color::Black); window.draw(tempArray); window.display(); //exit(0); } }
38daaba1188e20acfc8716462cf8147f3a452847.cu
#include <stdio.h> #include <algorithm> #include <iterator> #include <set> #include <string> #include <GL/glew.h> #include <GL/wglew.h> #include <GL/freeglut.h> #include <SFML/System.hpp> #include <SFML/Window.hpp> #include <SFML/Graphics.hpp> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <curand.h> #include <device_launch_parameters.h> #include <cuda_gl_interop.h> #define NBody 16384 #define NB_THREAD 128 #define ScreenWidth 1024 #define ScreenHeight 768 #define invNBody (1.0f / NBody) //Variable de "vitesse de simulation" pour éviter une simulation explosive __global__ void NBodyCUDA(float2 *nvBodyPos, float2 *nvBodyV, float2 *nvBodyDest, float tempInvNbody) { int index = threadIdx.x + blockIdx.x * blockDim.x; //Step 3 Simuler l'interaction du body courant avec tous les autres bodies de nvBodyPos, ajouter à la vitesse, appliquer la vitesse float2 tempForce = { 0, 0 }; for(int i = 0; i < NBody; i++) { float intGrav = 0; intGrav = -1.0f * (0.00001f + (nvBodyPos[index].x - nvBodyPos[i].x)*(nvBodyPos[index].x - nvBodyPos[i].x) + (nvBodyPos[index].y - nvBodyPos[i].y)*(nvBodyPos[index].y - nvBodyPos[i].y)); tempForce.x += (nvBodyPos[index].x - nvBodyPos[i].x) / intGrav; tempForce.y += (nvBodyPos[index].y - nvBodyPos[i].y) / intGrav; } nvBodyV[index].x += tempForce.x * tempInvNbody; nvBodyV[index].y += tempForce.y * tempInvNbody; //Step 2 Copier la bonne donnée de nvBodyPos dans nvBodyDest nvBodyDest[index].x = nvBodyPos[index].x + nvBodyV[index].x; nvBodyDest[index].y = nvBodyPos[index].y + nvBodyV[index].y; } int main() { //Allocation des tableaux de départ float2* BodyPos = (float2 *)malloc(NBody * sizeof(float2)); float2* BodyV = (float2 *)malloc(NBody * sizeof(float2)); //Allocation des tableaux de données initiales float2* nvBodyPos; cudaMalloc(&nvBodyPos, NBody * sizeof(float2)); float2* nvBodyV; cudaMalloc(&nvBodyV, NBody * sizeof(float2)); float2* nvBodyDest; cudaMalloc(&nvBodyDest, NBody * sizeof(float2)); //Verification d'erreur if (nvBodyPos == NULL || nvBodyDest == NULL || nvBodyV == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } //position initiale aléatoire srand(time(NULL)); for (int i = 0; i < NBody; i++) { BodyPos[i].x = (ScreenWidth / 2) + 600 * (-0.5 + (rand() / (float)RAND_MAX)); BodyPos[i].y = (ScreenHeight / 2) + 300 * (-0.5 + (rand() / (float)RAND_MAX)); } //Initialisation de SFML sf::VertexArray tempArray; tempArray.resize(NBody); tempArray.setPrimitiveType(sf::Points); sf::RenderWindow window(sf::VideoMode(1024, 768), "My window"); int blockSize = NB_THREAD; int gridSize = (NBody + blockSize - 1) / blockSize; while (window.isOpen()) { sf::Event event; while (window.pollEvent(event)) { } if (sf::Keyboard::isKeyPressed(sf::Keyboard::Escape)) exit(0); //Step 1 //Copie des positions vers le GPU cudaMemcpy(nvBodyPos, BodyPos, NBody * sizeof(float2), cudaMemcpyHostToDevice); //Lancement du kernel NBodyCUDA <<<gridSize, blockSize >>> (nvBodyPos, nvBodyV, nvBodyDest, invNBody); //Récupération des nouvelles positions cudaMemcpy(BodyPos, nvBodyDest, NBody * sizeof(float2), cudaMemcpyDeviceToHost); //Copie des données dans la Vertex Array pour l'affichage for (int i = 0; i < NBody; i++) { tempArray[i].position.x = BodyPos[i].x; tempArray[i].position.y = BodyPos[i].y; } //Affichage window.clear(sf::Color::Black); window.draw(tempArray); window.display(); //exit(0); } }
cedab538dec7eb3e3a7154ff99868f8f891b66bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename T, typename B> __global__ void pyrDown(const PtrStepSz<T> src, PtrStep<T> dst, const B b, int dst_cols) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_t; __shared__ work_t smem[256 + 4]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y; const int src_y = 2 * y; if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2) { { work_t sum; sum = 0.0625f * src(src_y - 2, x); sum = sum + 0.25f * src(src_y - 1, x); sum = sum + 0.375f * src(src_y , x); sum = sum + 0.25f * src(src_y + 1, x); sum = sum + 0.0625f * src(src_y + 2, x); smem[2 + threadIdx.x] = sum; } if (threadIdx.x < 2) { const int left_x = x - 2; work_t sum; sum = 0.0625f * src(src_y - 2, left_x); sum = sum + 0.25f * src(src_y - 1, left_x); sum = sum + 0.375f * src(src_y , left_x); sum = sum + 0.25f * src(src_y + 1, left_x); sum = sum + 0.0625f * src(src_y + 2, left_x); smem[threadIdx.x] = sum; } if (threadIdx.x > 253) { const int right_x = x + 2; work_t sum; sum = 0.0625f * src(src_y - 2, right_x); sum = sum + 0.25f * src(src_y - 1, right_x); sum = sum + 0.375f * src(src_y , right_x); sum = sum + 0.25f * src(src_y + 1, right_x); sum = sum + 0.0625f * src(src_y + 2, right_x); smem[4 + threadIdx.x] = sum; } } else { { work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col_high(x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col_high(x)); sum = sum + 0.375f * src(src_y , b.idx_col_high(x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col_high(x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col_high(x)); smem[2 + threadIdx.x] = sum; } if (threadIdx.x < 2) { const int left_x = x - 2; work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col(left_x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col(left_x)); sum = sum + 0.375f * src(src_y , b.idx_col(left_x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col(left_x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col(left_x)); smem[threadIdx.x] = sum; } if (threadIdx.x > 253) { const int right_x = x + 2; work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col_high(right_x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col_high(right_x)); sum = sum + 0.375f * src(src_y , b.idx_col_high(right_x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col_high(right_x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col_high(right_x)); smem[4 + threadIdx.x] = sum; } } __syncthreads(); if (threadIdx.x < 128) { const int tid2 = threadIdx.x * 2; work_t sum; sum = 0.0625f * smem[2 + tid2 - 2]; sum = sum + 0.25f * smem[2 + tid2 - 1]; sum = sum + 0.375f * smem[2 + tid2 ]; sum = sum + 0.25f * smem[2 + tid2 + 1]; sum = sum + 0.0625f * smem[2 + tid2 + 2]; const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2; if (dst_x < dst_cols) dst.ptr(y)[dst_x] = saturate_cast<T>(sum); } } template <typename T, template <typename> class B> void pyrDown_caller(PtrStepSz<T> src, PtrStepSz<T> dst, hipStream_t stream) { const dim3 block(256); const dim3 grid(divUp(src.cols, block.x), dst.rows); B<T> b(src.rows, src.cols); hipLaunchKernelGGL(( pyrDown<T>), dim3(grid), dim3(block), 0, stream, src, dst, b, dst.cols); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <typename T> void pyrDown_gpu(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) { pyrDown_caller<T, BrdReflect101>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrDown_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrDown_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrDown_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
cedab538dec7eb3e3a7154ff99868f8f891b66bd.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename T, typename B> __global__ void pyrDown(const PtrStepSz<T> src, PtrStep<T> dst, const B b, int dst_cols) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_t; __shared__ work_t smem[256 + 4]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y; const int src_y = 2 * y; if (src_y >= 2 && src_y < src.rows - 2 && x >= 2 && x < src.cols - 2) { { work_t sum; sum = 0.0625f * src(src_y - 2, x); sum = sum + 0.25f * src(src_y - 1, x); sum = sum + 0.375f * src(src_y , x); sum = sum + 0.25f * src(src_y + 1, x); sum = sum + 0.0625f * src(src_y + 2, x); smem[2 + threadIdx.x] = sum; } if (threadIdx.x < 2) { const int left_x = x - 2; work_t sum; sum = 0.0625f * src(src_y - 2, left_x); sum = sum + 0.25f * src(src_y - 1, left_x); sum = sum + 0.375f * src(src_y , left_x); sum = sum + 0.25f * src(src_y + 1, left_x); sum = sum + 0.0625f * src(src_y + 2, left_x); smem[threadIdx.x] = sum; } if (threadIdx.x > 253) { const int right_x = x + 2; work_t sum; sum = 0.0625f * src(src_y - 2, right_x); sum = sum + 0.25f * src(src_y - 1, right_x); sum = sum + 0.375f * src(src_y , right_x); sum = sum + 0.25f * src(src_y + 1, right_x); sum = sum + 0.0625f * src(src_y + 2, right_x); smem[4 + threadIdx.x] = sum; } } else { { work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col_high(x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col_high(x)); sum = sum + 0.375f * src(src_y , b.idx_col_high(x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col_high(x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col_high(x)); smem[2 + threadIdx.x] = sum; } if (threadIdx.x < 2) { const int left_x = x - 2; work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col(left_x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col(left_x)); sum = sum + 0.375f * src(src_y , b.idx_col(left_x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col(left_x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col(left_x)); smem[threadIdx.x] = sum; } if (threadIdx.x > 253) { const int right_x = x + 2; work_t sum; sum = 0.0625f * src(b.idx_row_low (src_y - 2), b.idx_col_high(right_x)); sum = sum + 0.25f * src(b.idx_row_low (src_y - 1), b.idx_col_high(right_x)); sum = sum + 0.375f * src(src_y , b.idx_col_high(right_x)); sum = sum + 0.25f * src(b.idx_row_high(src_y + 1), b.idx_col_high(right_x)); sum = sum + 0.0625f * src(b.idx_row_high(src_y + 2), b.idx_col_high(right_x)); smem[4 + threadIdx.x] = sum; } } __syncthreads(); if (threadIdx.x < 128) { const int tid2 = threadIdx.x * 2; work_t sum; sum = 0.0625f * smem[2 + tid2 - 2]; sum = sum + 0.25f * smem[2 + tid2 - 1]; sum = sum + 0.375f * smem[2 + tid2 ]; sum = sum + 0.25f * smem[2 + tid2 + 1]; sum = sum + 0.0625f * smem[2 + tid2 + 2]; const int dst_x = (blockIdx.x * blockDim.x + tid2) / 2; if (dst_x < dst_cols) dst.ptr(y)[dst_x] = saturate_cast<T>(sum); } } template <typename T, template <typename> class B> void pyrDown_caller(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream) { const dim3 block(256); const dim3 grid(divUp(src.cols, block.x), dst.rows); B<T> b(src.rows, src.cols); pyrDown<T><<<grid, block, 0, stream>>>(src, dst, b, dst.cols); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <typename T> void pyrDown_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) { pyrDown_caller<T, BrdReflect101>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrDown_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrDown_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrDown_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */